aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/00-INDEX6
-rw-r--r--Documentation/networking/dns_resolver.txt9
-rw-r--r--MAINTAINERS20
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/kernel/irq.c13
-rw-r--r--arch/alpha/kernel/irq_alpha.c11
-rw-r--r--arch/alpha/kernel/irq_i8259.c18
-rw-r--r--arch/alpha/kernel/irq_impl.h8
-rw-r--r--arch/alpha/kernel/irq_pyxis.c20
-rw-r--r--arch/alpha/kernel/irq_srm.c16
-rw-r--r--arch/alpha/kernel/sys_alcor.c28
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c16
-rw-r--r--arch/alpha/kernel/sys_dp264.c52
-rw-r--r--arch/alpha/kernel/sys_eb64p.c18
-rw-r--r--arch/alpha/kernel/sys_eiger.c14
-rw-r--r--arch/alpha/kernel/sys_jensen.c24
-rw-r--r--arch/alpha/kernel/sys_marvel.c42
-rw-r--r--arch/alpha/kernel/sys_mikasa.c16
-rw-r--r--arch/alpha/kernel/sys_noritake.c16
-rw-r--r--arch/alpha/kernel/sys_rawhide.c17
-rw-r--r--arch/alpha/kernel/sys_rx164.c16
-rw-r--r--arch/alpha/kernel/sys_sable.c20
-rw-r--r--arch/alpha/kernel/sys_takara.c14
-rw-r--r--arch/alpha/kernel/sys_titan.c22
-rw-r--r--arch/alpha/kernel/sys_wildfire.c32
-rw-r--r--arch/arm/common/Kconfig2
-rw-r--r--arch/arm/include/asm/mach/arch.h4
-rw-r--r--arch/arm/include/asm/pgalloc.h2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c26
-rw-r--r--arch/arm/kernel/ptrace.c6
-rw-r--r--arch/arm/mach-davinci/cpufreq.c2
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c7
-rw-r--r--arch/arm/mach-davinci/gpio-tnetv107x.c18
-rw-r--r--arch/arm/mach-davinci/include/mach/clkdev.h2
-rw-r--r--arch/arm/mach-omap2/mailbox.c10
-rw-r--r--arch/arm/mach-omap2/smartreflex.c33
-rw-r--r--arch/arm/mach-pxa/pxa25x.c1
-rw-r--r--arch/arm/mach-pxa/tosa-bt.c2
-rw-r--r--arch/arm/mach-pxa/tosa.c6
-rw-r--r--arch/arm/mach-s3c2440/Kconfig1
-rw-r--r--arch/arm/mach-s3c2440/include/mach/gta02.h26
-rw-r--r--arch/arm/mach-s3c64xx/clock.c6
-rw-r--r--arch/arm/mach-s3c64xx/dma.c11
-rw-r--r--arch/arm/mach-s3c64xx/gpiolib.c4
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c13
-rw-r--r--arch/arm/mach-s3c64xx/setup-keypad.c2
-rw-r--r--arch/arm/mach-s3c64xx/setup-sdhci.c2
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/gpio.h4
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c2
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c17
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-ap4evb.txt10
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-mackerel.txt10
-rw-r--r--arch/arm/plat-samsung/dev-uart.c2
-rw-r--r--arch/blackfin/lib/outs.S16
-rw-r--r--arch/blackfin/mach-common/cache.S2
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/alchemy/mtx-1/board_setup.c4
-rw-r--r--arch/mips/alchemy/mtx-1/platform.c9
-rw-r--r--arch/mips/alchemy/xxs1500/board_setup.c4
-rw-r--r--arch/mips/include/asm/perf_event.h12
-rw-r--r--arch/mips/kernel/ftrace.c179
-rw-r--r--arch/mips/kernel/perf_event.c345
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c4
-rw-r--r--arch/mips/kernel/signal.c2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp.c31
-rw-r--r--arch/mips/kernel/syscall.c5
-rw-r--r--arch/mips/kernel/vpe.c4
-rw-r--r--arch/mips/loongson/Kconfig5
-rw-r--r--arch/mips/loongson/common/cmdline.c5
-rw-r--r--arch/mips/loongson/common/machtype.c3
-rw-r--r--arch/mips/math-emu/ieee754int.h4
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/pci/ops-pmcmsp.c4
-rw-r--r--arch/mips/pmc-sierra/Kconfig4
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_time.c2
-rw-r--r--arch/mn10300/include/asm/atomic.h2
-rw-r--r--arch/mn10300/include/asm/uaccess.h5
-rw-r--r--arch/mn10300/mm/cache-inv-icache.c4
-rw-r--r--arch/powerpc/include/asm/lppaca.h16
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/kernel/machine_kexec.c5
-rw-r--r--arch/powerpc/kernel/paca.c14
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/mm/numa.c3
-rw-r--r--arch/powerpc/mm/tlb_hash64.c6
-rw-r--r--arch/powerpc/platforms/iseries/dt.c6
-rw-r--r--arch/powerpc/platforms/iseries/setup.c1
-rw-r--r--arch/sh/include/asm/sections.h2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c13
-rw-r--r--arch/sh/lib/delay.c10
-rw-r--r--arch/sh/mm/cache.c3
-rw-r--r--arch/x86/boot/compressed/mkpiggy.c7
-rw-r--r--arch/x86/include/asm/ce4100.h6
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h2
-rw-r--r--arch/x86/kernel/check.c8
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c13
-rw-r--r--arch/x86/mm/fault.c14
-rw-r--r--arch/x86/mm/init_64.c6
-rw-r--r--arch/x86/mm/numa_64.c6
-rw-r--r--arch/x86/mm/pageattr.c18
-rw-r--r--arch/x86/mm/pgtable.c11
-rw-r--r--arch/x86/pci/ce4100.c7
-rw-r--r--arch/x86/platform/ce4100/ce4100.c2
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c3
-rw-r--r--arch/x86/platform/uv/tlb_uv.c4
-rw-r--r--arch/x86/xen/mmu.c10
-rw-r--r--block/blk-core.c18
-rw-r--r--block/blk-flush.c8
-rw-r--r--block/blk-lib.c21
-rw-r--r--block/blk-throttle.c29
-rw-r--r--block/cfq-iosched.c6
-rw-r--r--block/elevator.c4
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c8
-rw-r--r--drivers/char/virtio_console.c8
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/gpio/ml_ioh_gpio.c1
-rw-r--r--drivers/gpio/pch_gpio.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h24
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c37
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c36
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c22
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c1
-rw-r--r--drivers/gpu/drm/radeon/rs690.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770.c3
-rw-r--r--drivers/hwmon/f71882fg.c4
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c1
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c4
-rw-r--r--drivers/idle/intel_idle.c24
-rw-r--r--drivers/isdn/hardware/eicon/istream.c2
-rw-r--r--drivers/media/common/tuners/tda8290.c14
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c21
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c6
-rw-r--r--drivers/media/dvb/frontends/dib7000m.c19
-rw-r--r--drivers/media/dvb/frontends/dib7000m.h15
-rw-r--r--drivers/media/dvb/mantis/mantis_pci.c1
-rw-r--r--drivers/media/rc/ir-raw.c3
-rw-r--r--drivers/media/rc/mceusb.c27
-rw-r--r--drivers/media/rc/nuvoton-cir.c5
-rw-r--r--drivers/media/rc/nuvoton-cir.h7
-rw-r--r--drivers/media/rc/rc-main.c2
-rw-r--r--drivers/media/video/au0828/au0828-video.c28
-rw-r--r--drivers/media/video/cx18/cx18-cards.c50
-rw-r--r--drivers/media/video/cx18/cx18-driver.c25
-rw-r--r--drivers/media/video/cx18/cx18-driver.h3
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c38
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c10
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c3
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c58
-rw-r--r--drivers/media/video/mem2mem_testdev.c1
-rw-r--r--drivers/media/video/s2255drv.c10
-rw-r--r--drivers/mfd/asic3.c4
-rw-r--r--drivers/mfd/davinci_voicecodec.c4
-rw-r--r--drivers/mfd/tps6586x.c10
-rw-r--r--drivers/mfd/ucb1x00-ts.c12
-rw-r--r--drivers/mfd/wm8994-core.c18
-rw-r--r--drivers/misc/bmp085.c1
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/sdio.c3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c43
-rw-r--r--drivers/mtd/chips/jedec_probe.c35
-rw-r--r--drivers/mtd/maps/amd76xrom.c1
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/mtd/onenand/generic.c2
-rw-r--r--drivers/mtd/onenand/omap2.c2
-rw-r--r--drivers/net/ariadne.c5
-rw-r--r--drivers/net/bnx2x/bnx2x.h31
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c87
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h29
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c39
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c37
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/bonding/bond_3ad.c32
-rw-r--r--drivers/net/bonding/bond_3ad.h3
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/cnic.c33
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/dnet.c3
-rw-r--r--drivers/net/e1000/e1000_osdep.h3
-rw-r--r--drivers/net/e1000e/netdev.c3
-rw-r--r--drivers/net/fec.c3
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvtap.c3
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/r6040.c115
-rw-r--r--drivers/net/r8169.c6
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/smsc911x.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/of/pdt.c112
-rw-r--r--drivers/pcmcia/pxa2xx_colibri.c3
-rw-r--r--drivers/pps/generators/Kconfig2
-rw-r--r--drivers/rtc/rtc-s3c.c12
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/keyboard.c3
-rw-r--r--drivers/s390/char/tape.h8
-rw-r--r--drivers/s390/char/tape_34xx.c59
-rw-r--r--drivers/s390/char/tape_3590.c83
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/target/target_core_tmr.c5
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--drivers/tty/serial/serial_cs.c1
-rw-r--r--drivers/usb/gadget/f_phonet.c15
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c1
-rw-r--r--drivers/video/backlight/ltv350qv.c9
-rw-r--r--drivers/watchdog/cpwd.c2
-rw-r--r--drivers/watchdog/hpwdt.c4
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c7
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/w83697ug_wdt.c2
-rw-r--r--fs/btrfs/ctree.h9
-rw-r--r--fs/btrfs/extent-tree.c35
-rw-r--r--fs/btrfs/extent_io.c33
-rw-r--r--fs/btrfs/file.c114
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/ceph/dir.c30
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/compat.c8
-rw-r--r--fs/dcache.c26
-rw-r--r--fs/exofs/namei.c8
-rw-r--r--fs/ext2/namei.c9
-rw-r--r--fs/fat/namei_vfat.c4
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/gfs2/dentry.c2
-rw-r--r--fs/hfs/dir.c50
-rw-r--r--fs/jfs/namei.c2
-rw-r--r--fs/minix/namei.c8
-rw-r--r--fs/namei.c14
-rw-r--r--fs/nfs/inode.c7
-rw-r--r--fs/nfs/nfs4_fs.h10
-rw-r--r--fs/nfs/nfs4filelayoutdev.c4
-rw-r--r--fs/nfs/nfs4proc.c131
-rw-r--r--fs/nfs/nfs4state.c29
-rw-r--r--fs/nfs/nfs4xdr.c4
-rw-r--r--fs/nfs/nfsroot.c29
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/nfs4callback.c2
-rw-r--r--fs/nfsd/nfs4state.c13
-rw-r--r--fs/nfsd/nfs4xdr.c4
-rw-r--r--fs/nilfs2/namei.c8
-rw-r--r--fs/nilfs2/segment.c3
-rw-r--r--fs/ocfs2/dcache.c2
-rw-r--r--fs/open.c8
-rw-r--r--fs/partitions/osf.c12
-rw-r--r--fs/proc/base.c30
-rw-r--r--fs/proc/inode.c8
-rw-r--r--fs/proc/proc_devtree.c2
-rw-r--r--fs/proc/proc_sysctl.c7
-rw-r--r--fs/reiserfs/namei.c2
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/sysv/namei.c8
-rw-r--r--fs/udf/namei.c11
-rw-r--r--fs/ufs/namei.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c11
-rw-r--r--include/keys/rxrpc-type.h1
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/blktrace_api.h1
-rw-r--r--include/linux/ceph/messenger.h2
-rw-r--r--include/linux/gfp.h11
-rw-r--r--include/linux/mfd/wm8994/core.h1
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/nfs_fs_sb.h10
-rw-r--r--include/linux/ptrace.h3
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sysctl.h14
-rw-r--r--include/target/target_core_transport.h2
-rw-r--r--include/trace/events/block.h6
-rw-r--r--kernel/cpuset.c7
-rw-r--r--kernel/ptrace.c6
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/sched_rt.c14
-rw-r--r--kernel/sysctl.c15
-rw-r--r--kernel/trace/blktrace.c16
-rw-r--r--lib/nlattr.c2
-rw-r--r--mm/huge_memory.c34
-rw-r--r--mm/mempolicy.c14
-rw-r--r--mm/rmap.c54
-rw-r--r--net/Makefile4
-rw-r--r--net/bridge/Kconfig1
-rw-r--r--net/ceph/messenger.c71
-rw-r--r--net/ceph/pagevec.c18
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/dev_addr_lists.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/dcb/dcbnl.c2
-rw-r--r--net/dccp/input.c7
-rw-r--r--net/dns_resolver/dns_key.c20
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/route.c21
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_log.c4
-rw-r--r--net/netlink/af_netlink.c18
-rw-r--r--net/rds/ib_send.c5
-rw-r--r--net/rds/loop.c11
-rw-r--r--net/rxrpc/ar-input.c1
-rw-r--r--net/sunrpc/sched.c75
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/unix/af_unix.c17
-rw-r--r--scripts/basic/fixdep.c19
-rw-r--r--scripts/mod/sumversion.c19
-rw-r--r--sound/pci/hda/patch_cirrus.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c5
-rw-r--r--sound/pci/hda/patch_realtek.c9
-rw-r--r--sound/soc/codecs/wm8978.c14
-rw-r--r--sound/soc/codecs/wm8994.c61
-rw-r--r--sound/soc/codecs/wm9081.c5
-rw-r--r--sound/soc/omap/am3517evm.c2
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--tools/perf/util/header.c11
-rw-r--r--tools/perf/util/symbol.c2
360 files changed, 2965 insertions, 1795 deletions
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index fe5c099b8fc8..4edd78dfb362 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -40,8 +40,6 @@ decnet.txt
40 - info on using the DECnet networking layer in Linux. 40 - info on using the DECnet networking layer in Linux.
41depca.txt 41depca.txt
42 - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver 42 - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
43dgrs.txt
44 - the Digi International RightSwitch SE-X Ethernet driver
45dmfe.txt 43dmfe.txt
46 - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. 44 - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
47e100.txt 45e100.txt
@@ -50,8 +48,6 @@ e1000.txt
50 - info on Intel's E1000 line of gigabit ethernet boards 48 - info on Intel's E1000 line of gigabit ethernet boards
51eql.txt 49eql.txt
52 - serial IP load balancing 50 - serial IP load balancing
53ethertap.txt
54 - the Ethertap user space packet reception and transmission driver
55ewrk3.txt 51ewrk3.txt
56 - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver 52 - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
57filter.txt 53filter.txt
@@ -104,8 +100,6 @@ tuntap.txt
104 - TUN/TAP device driver, allowing user space Rx/Tx of packets. 100 - TUN/TAP device driver, allowing user space Rx/Tx of packets.
105vortex.txt 101vortex.txt
106 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. 102 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
107wavelan.txt
108 - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
109x25.txt 103x25.txt
110 - general info on X.25 development. 104 - general info on X.25 development.
111x25-iface.txt 105x25-iface.txt
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt
index aefd1e681804..04ca06325b08 100644
--- a/Documentation/networking/dns_resolver.txt
+++ b/Documentation/networking/dns_resolver.txt
@@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
61 create dns_resolver foo:* * /usr/sbin/dns.foo %k 61 create dns_resolver foo:* * /usr/sbin/dns.foo %k
62 62
63 63
64
65===== 64=====
66USAGE 65USAGE
67===== 66=====
@@ -104,6 +103,14 @@ implemented in the module can be called after doing:
104 returned also. 103 returned also.
105 104
106 105
106===============================
107READING DNS KEYS FROM USERSPACE
108===============================
109
110Keys of dns_resolver type can be read from userspace using keyctl_read() or
111"keyctl read/print/pipe".
112
113
107========= 114=========
108MECHANISM 115MECHANISM
109========= 116=========
diff --git a/MAINTAINERS b/MAINTAINERS
index 8afba6321e24..f1bc3dc6b369 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1010,6 +1010,15 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
1010S: Maintained 1010S: Maintained
1011F: arch/arm/mach-s5p*/ 1011F: arch/arm/mach-s5p*/
1012 1012
1013ARM/SAMSUNG MOBILE MACHINE SUPPORT
1014M: Kyungmin Park <kyungmin.park@samsung.com>
1015L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1016S: Maintained
1017F: arch/arm/mach-s5pv210/mach-aquila.c
1018F: arch/arm/mach-s5pv210/mach-goni.c
1019F: arch/arm/mach-exynos4/mach-universal_c210.c
1020F: arch/arm/mach-exynos4/mach-nuri.c
1021
1013ARM/SAMSUNG S5P SERIES FIMC SUPPORT 1022ARM/SAMSUNG S5P SERIES FIMC SUPPORT
1014M: Kyungmin Park <kyungmin.park@samsung.com> 1023M: Kyungmin Park <kyungmin.park@samsung.com>
1015M: Sylwester Nawrocki <s.nawrocki@samsung.com> 1024M: Sylwester Nawrocki <s.nawrocki@samsung.com>
@@ -1467,6 +1476,7 @@ F: include/net/bluetooth/
1467 1476
1468BONDING DRIVER 1477BONDING DRIVER
1469M: Jay Vosburgh <fubar@us.ibm.com> 1478M: Jay Vosburgh <fubar@us.ibm.com>
1479M: Andy Gospodarek <andy@greyhouse.net>
1470L: netdev@vger.kernel.org 1480L: netdev@vger.kernel.org
1471W: http://sourceforge.net/projects/bonding/ 1481W: http://sourceforge.net/projects/bonding/
1472S: Supported 1482S: Supported
@@ -2033,7 +2043,7 @@ F: Documentation/scsi/dc395x.txt
2033F: drivers/scsi/dc395x.* 2043F: drivers/scsi/dc395x.*
2034 2044
2035DCCP PROTOCOL 2045DCCP PROTOCOL
2036M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 2046M: Gerrit Renker <gerrit@erg.abdn.ac.uk>
2037L: dccp@vger.kernel.org 2047L: dccp@vger.kernel.org
2038W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp 2048W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
2039S: Maintained 2049S: Maintained
@@ -3519,7 +3529,7 @@ F: drivers/hwmon/jc42.c
3519F: Documentation/hwmon/jc42 3529F: Documentation/hwmon/jc42
3520 3530
3521JFS FILESYSTEM 3531JFS FILESYSTEM
3522M: Dave Kleikamp <shaggy@linux.vnet.ibm.com> 3532M: Dave Kleikamp <shaggy@kernel.org>
3523L: jfs-discussion@lists.sourceforge.net 3533L: jfs-discussion@lists.sourceforge.net
3524W: http://jfs.sourceforge.net/ 3534W: http://jfs.sourceforge.net/
3525T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git 3535T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
@@ -4282,10 +4292,7 @@ S: Maintained
4282F: net/sched/sch_netem.c 4292F: net/sched/sch_netem.c
4283 4293
4284NETERION 10GbE DRIVERS (s2io/vxge) 4294NETERION 10GbE DRIVERS (s2io/vxge)
4285M: Ramkrishna Vepa <ramkrishna.vepa@exar.com> 4295M: Jon Mason <jdmason@kudzu.us>
4286M: Sivakumar Subramani <sivakumar.subramani@exar.com>
4287M: Sreenivasa Honnur <sreenivasa.honnur@exar.com>
4288M: Jon Mason <jon.mason@exar.com>
4289L: netdev@vger.kernel.org 4296L: netdev@vger.kernel.org
4290W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous 4297W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
4291W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous 4298W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
@@ -5171,6 +5178,7 @@ F: drivers/char/random.c
5171 5178
5172RAPIDIO SUBSYSTEM 5179RAPIDIO SUBSYSTEM
5173M: Matt Porter <mporter@kernel.crashing.org> 5180M: Matt Porter <mporter@kernel.crashing.org>
5181M: Alexandre Bounine <alexandre.bounine@idt.com>
5174S: Maintained 5182S: Maintained
5175F: drivers/rapidio/ 5183F: drivers/rapidio/
5176 5184
diff --git a/Makefile b/Makefile
index 2f7d92255b57..d6592b63c8cb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 38 3SUBLEVEL = 38
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Flesh-Eating Bats with Fangs 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 47f63d480141..cc31bec2e316 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -11,6 +11,7 @@ config ALPHA
11 select HAVE_GENERIC_HARDIRQS 11 select HAVE_GENERIC_HARDIRQS
12 select GENERIC_IRQ_PROBE 12 select GENERIC_IRQ_PROBE
13 select AUTO_IRQ_AFFINITY if SMP 13 select AUTO_IRQ_AFFINITY if SMP
14 select GENERIC_HARDIRQS_NO_DEPRECATED
14 help 15 help
15 The Alpha is a 64-bit general-purpose processor designed and 16 The Alpha is a 64-bit general-purpose processor designed and
16 marketed by the Digital Equipment Corporation of blessed memory, 17 marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 9ab234f48dd8..a19d60082299 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
44 44
45int irq_select_affinity(unsigned int irq) 45int irq_select_affinity(unsigned int irq)
46{ 46{
47 struct irq_desc *desc = irq_to_desc[irq]; 47 struct irq_data *data = irq_get_irq_data(irq);
48 struct irq_chip *chip;
48 static int last_cpu; 49 static int last_cpu;
49 int cpu = last_cpu + 1; 50 int cpu = last_cpu + 1;
50 51
51 if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) 52 if (!data)
53 return 1;
54 chip = irq_data_get_irq_chip(data);
55
56 if (!chip->irq_set_affinity || irq_user_affinity[irq])
52 return 1; 57 return 1;
53 58
54 while (!cpu_possible(cpu) || 59 while (!cpu_possible(cpu) ||
@@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
56 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 61 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
57 last_cpu = cpu; 62 last_cpu = cpu;
58 63
59 cpumask_copy(desc->affinity, cpumask_of(cpu)); 64 cpumask_copy(data->affinity, cpumask_of(cpu));
60 get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); 65 chip->irq_set_affinity(data, cpumask_of(cpu), false);
61 return 0; 66 return 0;
62} 67}
63#endif /* CONFIG_SMP */ 68#endif /* CONFIG_SMP */
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 2d0679b60939..411ca11d0a18 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
228void __init 228void __init
229init_rtc_irq(void) 229init_rtc_irq(void)
230{ 230{
231 struct irq_desc *desc = irq_to_desc(RTC_IRQ); 231 set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
232 232 handle_simple_irq, "RTC");
233 if (desc) { 233 setup_irq(RTC_IRQ, &timer_irqaction);
234 desc->status |= IRQ_DISABLED;
235 set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
236 handle_simple_irq, "RTC");
237 setup_irq(RTC_IRQ, &timer_irqaction);
238 }
239} 234}
240 235
241/* Dummy irqactions. */ 236/* Dummy irqactions. */
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 956ea0ed1694..c7cc9813e45f 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
33} 33}
34 34
35inline void 35inline void
36i8259a_enable_irq(unsigned int irq) 36i8259a_enable_irq(struct irq_data *d)
37{ 37{
38 spin_lock(&i8259_irq_lock); 38 spin_lock(&i8259_irq_lock);
39 i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 39 i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
40 spin_unlock(&i8259_irq_lock); 40 spin_unlock(&i8259_irq_lock);
41} 41}
42 42
@@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
47} 47}
48 48
49void 49void
50i8259a_disable_irq(unsigned int irq) 50i8259a_disable_irq(struct irq_data *d)
51{ 51{
52 spin_lock(&i8259_irq_lock); 52 spin_lock(&i8259_irq_lock);
53 __i8259a_disable_irq(irq); 53 __i8259a_disable_irq(d->irq);
54 spin_unlock(&i8259_irq_lock); 54 spin_unlock(&i8259_irq_lock);
55} 55}
56 56
57void 57void
58i8259a_mask_and_ack_irq(unsigned int irq) 58i8259a_mask_and_ack_irq(struct irq_data *d)
59{ 59{
60 unsigned int irq = d->irq;
61
60 spin_lock(&i8259_irq_lock); 62 spin_lock(&i8259_irq_lock);
61 __i8259a_disable_irq(irq); 63 __i8259a_disable_irq(irq);
62 64
@@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
71 73
72struct irq_chip i8259a_irq_type = { 74struct irq_chip i8259a_irq_type = {
73 .name = "XT-PIC", 75 .name = "XT-PIC",
74 .unmask = i8259a_enable_irq, 76 .irq_unmask = i8259a_enable_irq,
75 .mask = i8259a_disable_irq, 77 .irq_mask = i8259a_disable_irq,
76 .mask_ack = i8259a_mask_and_ack_irq, 78 .irq_mask_ack = i8259a_mask_and_ack_irq,
77}; 79};
78 80
79void __init 81void __init
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h
index b63ccd7386f1..d507a234b05d 100644
--- a/arch/alpha/kernel/irq_impl.h
+++ b/arch/alpha/kernel/irq_impl.h
@@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
31 31
32extern void common_init_isa_dma(void); 32extern void common_init_isa_dma(void);
33 33
34extern void i8259a_enable_irq(unsigned int); 34extern void i8259a_enable_irq(struct irq_data *d);
35extern void i8259a_disable_irq(unsigned int); 35extern void i8259a_disable_irq(struct irq_data *d);
36extern void i8259a_mask_and_ack_irq(unsigned int); 36extern void i8259a_mask_and_ack_irq(struct irq_data *d);
37extern unsigned int i8259a_startup_irq(unsigned int);
38extern void i8259a_end_irq(unsigned int);
39extern struct irq_chip i8259a_irq_type; 37extern struct irq_chip i8259a_irq_type;
40extern void init_i8259a_irqs(void); 38extern void init_i8259a_irqs(void);
41 39
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index 2863458c853e..b30227fa7f5f 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
29} 29}
30 30
31static inline void 31static inline void
32pyxis_enable_irq(unsigned int irq) 32pyxis_enable_irq(struct irq_data *d)
33{ 33{
34 pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 34 pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
35} 35}
36 36
37static void 37static void
38pyxis_disable_irq(unsigned int irq) 38pyxis_disable_irq(struct irq_data *d)
39{ 39{
40 pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 40 pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
41} 41}
42 42
43static void 43static void
44pyxis_mask_and_ack_irq(unsigned int irq) 44pyxis_mask_and_ack_irq(struct irq_data *d)
45{ 45{
46 unsigned long bit = 1UL << (irq - 16); 46 unsigned long bit = 1UL << (d->irq - 16);
47 unsigned long mask = cached_irq_mask &= ~bit; 47 unsigned long mask = cached_irq_mask &= ~bit;
48 48
49 /* Disable the interrupt. */ 49 /* Disable the interrupt. */
@@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
58 58
59static struct irq_chip pyxis_irq_type = { 59static struct irq_chip pyxis_irq_type = {
60 .name = "PYXIS", 60 .name = "PYXIS",
61 .mask_ack = pyxis_mask_and_ack_irq, 61 .irq_mask_ack = pyxis_mask_and_ack_irq,
62 .mask = pyxis_disable_irq, 62 .irq_mask = pyxis_disable_irq,
63 .unmask = pyxis_enable_irq, 63 .irq_unmask = pyxis_enable_irq,
64}; 64};
65 65
66void 66void
@@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
103 if ((ignore_mask >> i) & 1) 103 if ((ignore_mask >> i) & 1)
104 continue; 104 continue;
105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); 105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
106 irq_to_desc(i)->status |= IRQ_LEVEL; 106 irq_set_status_flags(i, IRQ_LEVEL);
107 } 107 }
108 108
109 setup_irq(16+7, &isa_cascade_irqaction); 109 setup_irq(16+7, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index 0e57e828b413..82a47bba41c4 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -18,27 +18,27 @@
18DEFINE_SPINLOCK(srm_irq_lock); 18DEFINE_SPINLOCK(srm_irq_lock);
19 19
20static inline void 20static inline void
21srm_enable_irq(unsigned int irq) 21srm_enable_irq(struct irq_data *d)
22{ 22{
23 spin_lock(&srm_irq_lock); 23 spin_lock(&srm_irq_lock);
24 cserve_ena(irq - 16); 24 cserve_ena(d->irq - 16);
25 spin_unlock(&srm_irq_lock); 25 spin_unlock(&srm_irq_lock);
26} 26}
27 27
28static void 28static void
29srm_disable_irq(unsigned int irq) 29srm_disable_irq(struct irq_data *d)
30{ 30{
31 spin_lock(&srm_irq_lock); 31 spin_lock(&srm_irq_lock);
32 cserve_dis(irq - 16); 32 cserve_dis(d->irq - 16);
33 spin_unlock(&srm_irq_lock); 33 spin_unlock(&srm_irq_lock);
34} 34}
35 35
36/* Handle interrupts from the SRM, assuming no additional weirdness. */ 36/* Handle interrupts from the SRM, assuming no additional weirdness. */
37static struct irq_chip srm_irq_type = { 37static struct irq_chip srm_irq_type = {
38 .name = "SRM", 38 .name = "SRM",
39 .unmask = srm_enable_irq, 39 .irq_unmask = srm_enable_irq,
40 .mask = srm_disable_irq, 40 .irq_mask = srm_disable_irq,
41 .mask_ack = srm_disable_irq, 41 .irq_mask_ack = srm_disable_irq,
42}; 42};
43 43
44void __init 44void __init
@@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
52 if (i < 64 && ((ignore_mask >> i) & 1)) 52 if (i < 64 && ((ignore_mask >> i) & 1))
53 continue; 53 continue;
54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); 54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
55 irq_to_desc(i)->status |= IRQ_LEVEL; 55 irq_set_status_flags(i, IRQ_LEVEL);
56 } 56 }
57} 57}
58 58
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 7bef61768236..88d95e872f55 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
44} 44}
45 45
46static inline void 46static inline void
47alcor_enable_irq(unsigned int irq) 47alcor_enable_irq(struct irq_data *d)
48{ 48{
49 alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 49 alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
50} 50}
51 51
52static void 52static void
53alcor_disable_irq(unsigned int irq) 53alcor_disable_irq(struct irq_data *d)
54{ 54{
55 alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 55 alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
56} 56}
57 57
58static void 58static void
59alcor_mask_and_ack_irq(unsigned int irq) 59alcor_mask_and_ack_irq(struct irq_data *d)
60{ 60{
61 alcor_disable_irq(irq); 61 alcor_disable_irq(d);
62 62
63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
64 *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); 64 *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
65 *(vuip)GRU_INT_CLEAR = 0; mb(); 65 *(vuip)GRU_INT_CLEAR = 0; mb();
66} 66}
67 67
68static void 68static void
69alcor_isa_mask_and_ack_irq(unsigned int irq) 69alcor_isa_mask_and_ack_irq(struct irq_data *d)
70{ 70{
71 i8259a_mask_and_ack_irq(irq); 71 i8259a_mask_and_ack_irq(d);
72 72
73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); 74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb();
@@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
77 77
78static struct irq_chip alcor_irq_type = { 78static struct irq_chip alcor_irq_type = {
79 .name = "ALCOR", 79 .name = "ALCOR",
80 .unmask = alcor_enable_irq, 80 .irq_unmask = alcor_enable_irq,
81 .mask = alcor_disable_irq, 81 .irq_mask = alcor_disable_irq,
82 .mask_ack = alcor_mask_and_ack_irq, 82 .irq_mask_ack = alcor_mask_and_ack_irq,
83}; 83};
84 84
85static void 85static void
@@ -126,9 +126,9 @@ alcor_init_irq(void)
126 if (i >= 16+20 && i <= 16+30) 126 if (i >= 16+20 && i <= 16+30)
127 continue; 127 continue;
128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); 128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
129 irq_to_desc(i)->status |= IRQ_LEVEL; 129 irq_set_status_flags(i, IRQ_LEVEL);
130 } 130 }
131 i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; 131 i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
132 132
133 init_i8259a_irqs(); 133 init_i8259a_irqs();
134 common_init_isa_dma(); 134 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index b0c916493aea..57eb6307bc27 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
46} 46}
47 47
48static inline void 48static inline void
49cabriolet_enable_irq(unsigned int irq) 49cabriolet_enable_irq(struct irq_data *d)
50{ 50{
51 cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); 51 cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
52} 52}
53 53
54static void 54static void
55cabriolet_disable_irq(unsigned int irq) 55cabriolet_disable_irq(struct irq_data *d)
56{ 56{
57 cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); 57 cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
58} 58}
59 59
60static struct irq_chip cabriolet_irq_type = { 60static struct irq_chip cabriolet_irq_type = {
61 .name = "CABRIOLET", 61 .name = "CABRIOLET",
62 .unmask = cabriolet_enable_irq, 62 .irq_unmask = cabriolet_enable_irq,
63 .mask = cabriolet_disable_irq, 63 .irq_mask = cabriolet_disable_irq,
64 .mask_ack = cabriolet_disable_irq, 64 .irq_mask_ack = cabriolet_disable_irq,
65}; 65};
66 66
67static void 67static void
@@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
107 for (i = 16; i < 35; ++i) { 107 for (i = 16; i < 35; ++i) {
108 set_irq_chip_and_handler(i, &cabriolet_irq_type, 108 set_irq_chip_and_handler(i, &cabriolet_irq_type,
109 handle_level_irq); 109 handle_level_irq);
110 irq_to_desc(i)->status |= IRQ_LEVEL; 110 irq_set_status_flags(i, IRQ_LEVEL);
111 } 111 }
112 } 112 }
113 113
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index edad5f759ccd..481df4ecb651 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
98} 98}
99 99
100static void 100static void
101dp264_enable_irq(unsigned int irq) 101dp264_enable_irq(struct irq_data *d)
102{ 102{
103 spin_lock(&dp264_irq_lock); 103 spin_lock(&dp264_irq_lock);
104 cached_irq_mask |= 1UL << irq; 104 cached_irq_mask |= 1UL << d->irq;
105 tsunami_update_irq_hw(cached_irq_mask); 105 tsunami_update_irq_hw(cached_irq_mask);
106 spin_unlock(&dp264_irq_lock); 106 spin_unlock(&dp264_irq_lock);
107} 107}
108 108
109static void 109static void
110dp264_disable_irq(unsigned int irq) 110dp264_disable_irq(struct irq_data *d)
111{ 111{
112 spin_lock(&dp264_irq_lock); 112 spin_lock(&dp264_irq_lock);
113 cached_irq_mask &= ~(1UL << irq); 113 cached_irq_mask &= ~(1UL << d->irq);
114 tsunami_update_irq_hw(cached_irq_mask); 114 tsunami_update_irq_hw(cached_irq_mask);
115 spin_unlock(&dp264_irq_lock); 115 spin_unlock(&dp264_irq_lock);
116} 116}
117 117
118static void 118static void
119clipper_enable_irq(unsigned int irq) 119clipper_enable_irq(struct irq_data *d)
120{ 120{
121 spin_lock(&dp264_irq_lock); 121 spin_lock(&dp264_irq_lock);
122 cached_irq_mask |= 1UL << (irq - 16); 122 cached_irq_mask |= 1UL << (d->irq - 16);
123 tsunami_update_irq_hw(cached_irq_mask); 123 tsunami_update_irq_hw(cached_irq_mask);
124 spin_unlock(&dp264_irq_lock); 124 spin_unlock(&dp264_irq_lock);
125} 125}
126 126
127static void 127static void
128clipper_disable_irq(unsigned int irq) 128clipper_disable_irq(struct irq_data *d)
129{ 129{
130 spin_lock(&dp264_irq_lock); 130 spin_lock(&dp264_irq_lock);
131 cached_irq_mask &= ~(1UL << (irq - 16)); 131 cached_irq_mask &= ~(1UL << (d->irq - 16));
132 tsunami_update_irq_hw(cached_irq_mask); 132 tsunami_update_irq_hw(cached_irq_mask);
133 spin_unlock(&dp264_irq_lock); 133 spin_unlock(&dp264_irq_lock);
134} 134}
@@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
149} 149}
150 150
151static int 151static int
152dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) 152dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
153{ 153 bool force)
154{
154 spin_lock(&dp264_irq_lock); 155 spin_lock(&dp264_irq_lock);
155 cpu_set_irq_affinity(irq, *affinity); 156 cpu_set_irq_affinity(d->irq, *affinity);
156 tsunami_update_irq_hw(cached_irq_mask); 157 tsunami_update_irq_hw(cached_irq_mask);
157 spin_unlock(&dp264_irq_lock); 158 spin_unlock(&dp264_irq_lock);
158 159
@@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
160} 161}
161 162
162static int 163static int
163clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) 164clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
164{ 165 bool force)
166{
165 spin_lock(&dp264_irq_lock); 167 spin_lock(&dp264_irq_lock);
166 cpu_set_irq_affinity(irq - 16, *affinity); 168 cpu_set_irq_affinity(d->irq - 16, *affinity);
167 tsunami_update_irq_hw(cached_irq_mask); 169 tsunami_update_irq_hw(cached_irq_mask);
168 spin_unlock(&dp264_irq_lock); 170 spin_unlock(&dp264_irq_lock);
169 171
@@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
171} 173}
172 174
173static struct irq_chip dp264_irq_type = { 175static struct irq_chip dp264_irq_type = {
174 .name = "DP264", 176 .name = "DP264",
175 .unmask = dp264_enable_irq, 177 .irq_unmask = dp264_enable_irq,
176 .mask = dp264_disable_irq, 178 .irq_mask = dp264_disable_irq,
177 .mask_ack = dp264_disable_irq, 179 .irq_mask_ack = dp264_disable_irq,
178 .set_affinity = dp264_set_affinity, 180 .irq_set_affinity = dp264_set_affinity,
179}; 181};
180 182
181static struct irq_chip clipper_irq_type = { 183static struct irq_chip clipper_irq_type = {
182 .name = "CLIPPER", 184 .name = "CLIPPER",
183 .unmask = clipper_enable_irq, 185 .irq_unmask = clipper_enable_irq,
184 .mask = clipper_disable_irq, 186 .irq_mask = clipper_disable_irq,
185 .mask_ack = clipper_disable_irq, 187 .irq_mask_ack = clipper_disable_irq,
186 .set_affinity = clipper_set_affinity, 188 .irq_set_affinity = clipper_set_affinity,
187}; 189};
188 190
189static void 191static void
@@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
268{ 270{
269 long i; 271 long i;
270 for (i = imin; i <= imax; ++i) { 272 for (i = imin; i <= imax; ++i) {
271 irq_to_desc(i)->status |= IRQ_LEVEL;
272 set_irq_chip_and_handler(i, ops, handle_level_irq); 273 set_irq_chip_and_handler(i, ops, handle_level_irq);
274 irq_set_status_flags(i, IRQ_LEVEL);
273 } 275 }
274} 276}
275 277
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index ae5f29d127b0..402e908ffb3e 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
44} 44}
45 45
46static inline void 46static inline void
47eb64p_enable_irq(unsigned int irq) 47eb64p_enable_irq(struct irq_data *d)
48{ 48{
49 eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 49 eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
50} 50}
51 51
52static void 52static void
53eb64p_disable_irq(unsigned int irq) 53eb64p_disable_irq(struct irq_data *d)
54{ 54{
55 eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); 55 eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
56} 56}
57 57
58static struct irq_chip eb64p_irq_type = { 58static struct irq_chip eb64p_irq_type = {
59 .name = "EB64P", 59 .name = "EB64P",
60 .unmask = eb64p_enable_irq, 60 .irq_unmask = eb64p_enable_irq,
61 .mask = eb64p_disable_irq, 61 .irq_mask = eb64p_disable_irq,
62 .mask_ack = eb64p_disable_irq, 62 .irq_mask_ack = eb64p_disable_irq,
63}; 63};
64 64
65static void 65static void
@@ -118,9 +118,9 @@ eb64p_init_irq(void)
118 init_i8259a_irqs(); 118 init_i8259a_irqs();
119 119
120 for (i = 16; i < 32; ++i) { 120 for (i = 16; i < 32; ++i) {
121 irq_to_desc(i)->status |= IRQ_LEVEL;
122 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); 121 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
123 } 122 irq_set_status_flags(i, IRQ_LEVEL);
123 }
124 124
125 common_init_isa_dma(); 125 common_init_isa_dma();
126 setup_irq(16+5, &isa_cascade_irqaction); 126 setup_irq(16+5, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 1121bc5c6c6c..0b44a54c1522 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
51} 51}
52 52
53static inline void 53static inline void
54eiger_enable_irq(unsigned int irq) 54eiger_enable_irq(struct irq_data *d)
55{ 55{
56 unsigned int irq = d->irq;
56 unsigned long mask; 57 unsigned long mask;
57 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 58 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
58 eiger_update_irq_hw(irq, mask); 59 eiger_update_irq_hw(irq, mask);
59} 60}
60 61
61static void 62static void
62eiger_disable_irq(unsigned int irq) 63eiger_disable_irq(struct irq_data *d)
63{ 64{
65 unsigned int irq = d->irq;
64 unsigned long mask; 66 unsigned long mask;
65 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 67 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
66 eiger_update_irq_hw(irq, mask); 68 eiger_update_irq_hw(irq, mask);
@@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
68 70
69static struct irq_chip eiger_irq_type = { 71static struct irq_chip eiger_irq_type = {
70 .name = "EIGER", 72 .name = "EIGER",
71 .unmask = eiger_enable_irq, 73 .irq_unmask = eiger_enable_irq,
72 .mask = eiger_disable_irq, 74 .irq_mask = eiger_disable_irq,
73 .mask_ack = eiger_disable_irq, 75 .irq_mask_ack = eiger_disable_irq,
74}; 76};
75 77
76static void 78static void
@@ -136,8 +138,8 @@ eiger_init_irq(void)
136 init_i8259a_irqs(); 138 init_i8259a_irqs();
137 139
138 for (i = 16; i < 128; ++i) { 140 for (i = 16; i < 128; ++i) {
139 irq_to_desc(i)->status |= IRQ_LEVEL;
140 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); 141 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
142 irq_set_status_flags(i, IRQ_LEVEL);
141 } 143 }
142} 144}
143 145
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 34f55e03d331..00341b75c8b2 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -63,34 +63,34 @@
63 */ 63 */
64 64
65static void 65static void
66jensen_local_enable(unsigned int irq) 66jensen_local_enable(struct irq_data *d)
67{ 67{
68 /* the parport is really hw IRQ 1, silly Jensen. */ 68 /* the parport is really hw IRQ 1, silly Jensen. */
69 if (irq == 7) 69 if (d->irq == 7)
70 i8259a_enable_irq(1); 70 i8259a_enable_irq(d);
71} 71}
72 72
73static void 73static void
74jensen_local_disable(unsigned int irq) 74jensen_local_disable(struct irq_data *d)
75{ 75{
76 /* the parport is really hw IRQ 1, silly Jensen. */ 76 /* the parport is really hw IRQ 1, silly Jensen. */
77 if (irq == 7) 77 if (d->irq == 7)
78 i8259a_disable_irq(1); 78 i8259a_disable_irq(d);
79} 79}
80 80
81static void 81static void
82jensen_local_mask_ack(unsigned int irq) 82jensen_local_mask_ack(struct irq_data *d)
83{ 83{
84 /* the parport is really hw IRQ 1, silly Jensen. */ 84 /* the parport is really hw IRQ 1, silly Jensen. */
85 if (irq == 7) 85 if (d->irq == 7)
86 i8259a_mask_and_ack_irq(1); 86 i8259a_mask_and_ack_irq(d);
87} 87}
88 88
89static struct irq_chip jensen_local_irq_type = { 89static struct irq_chip jensen_local_irq_type = {
90 .name = "LOCAL", 90 .name = "LOCAL",
91 .unmask = jensen_local_enable, 91 .irq_unmask = jensen_local_enable,
92 .mask = jensen_local_disable, 92 .irq_mask = jensen_local_disable,
93 .mask_ack = jensen_local_mask_ack, 93 .irq_mask_ack = jensen_local_mask_ack,
94}; 94};
95 95
96static void 96static void
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 2bfc9f1b1ddc..e61910734e41 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
104} 104}
105 105
106static void 106static void
107io7_enable_irq(unsigned int irq) 107io7_enable_irq(struct irq_data *d)
108{ 108{
109 volatile unsigned long *ctl; 109 volatile unsigned long *ctl;
110 unsigned int irq = d->irq;
110 struct io7 *io7; 111 struct io7 *io7;
111 112
112 ctl = io7_get_irq_ctl(irq, &io7); 113 ctl = io7_get_irq_ctl(irq, &io7);
@@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
115 __func__, irq); 116 __func__, irq);
116 return; 117 return;
117 } 118 }
118 119
119 spin_lock(&io7->irq_lock); 120 spin_lock(&io7->irq_lock);
120 *ctl |= 1UL << 24; 121 *ctl |= 1UL << 24;
121 mb(); 122 mb();
@@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
124} 125}
125 126
126static void 127static void
127io7_disable_irq(unsigned int irq) 128io7_disable_irq(struct irq_data *d)
128{ 129{
129 volatile unsigned long *ctl; 130 volatile unsigned long *ctl;
131 unsigned int irq = d->irq;
130 struct io7 *io7; 132 struct io7 *io7;
131 133
132 ctl = io7_get_irq_ctl(irq, &io7); 134 ctl = io7_get_irq_ctl(irq, &io7);
@@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
135 __func__, irq); 137 __func__, irq);
136 return; 138 return;
137 } 139 }
138 140
139 spin_lock(&io7->irq_lock); 141 spin_lock(&io7->irq_lock);
140 *ctl &= ~(1UL << 24); 142 *ctl &= ~(1UL << 24);
141 mb(); 143 mb();
@@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
144} 146}
145 147
146static void 148static void
147marvel_irq_noop(unsigned int irq) 149marvel_irq_noop(struct irq_data *d)
148{ 150{
149 return; 151 return;
150}
151
152static unsigned int
153marvel_irq_noop_return(unsigned int irq)
154{
155 return 0;
156} 152}
157 153
158static struct irq_chip marvel_legacy_irq_type = { 154static struct irq_chip marvel_legacy_irq_type = {
159 .name = "LEGACY", 155 .name = "LEGACY",
160 .mask = marvel_irq_noop, 156 .irq_mask = marvel_irq_noop,
161 .unmask = marvel_irq_noop, 157 .irq_unmask = marvel_irq_noop,
162}; 158};
163 159
164static struct irq_chip io7_lsi_irq_type = { 160static struct irq_chip io7_lsi_irq_type = {
165 .name = "LSI", 161 .name = "LSI",
166 .unmask = io7_enable_irq, 162 .irq_unmask = io7_enable_irq,
167 .mask = io7_disable_irq, 163 .irq_mask = io7_disable_irq,
168 .mask_ack = io7_disable_irq, 164 .irq_mask_ack = io7_disable_irq,
169}; 165};
170 166
171static struct irq_chip io7_msi_irq_type = { 167static struct irq_chip io7_msi_irq_type = {
172 .name = "MSI", 168 .name = "MSI",
173 .unmask = io7_enable_irq, 169 .irq_unmask = io7_enable_irq,
174 .mask = io7_disable_irq, 170 .irq_mask = io7_disable_irq,
175 .ack = marvel_irq_noop, 171 .irq_ack = marvel_irq_noop,
176}; 172};
177 173
178static void 174static void
@@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
280 276
281 /* Set up the lsi irqs. */ 277 /* Set up the lsi irqs. */
282 for (i = 0; i < 128; ++i) { 278 for (i = 0; i < 128; ++i) {
283 irq_to_desc(base + i)->status |= IRQ_LEVEL;
284 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); 279 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
280 irq_set_status_flags(i, IRQ_LEVEL);
285 } 281 }
286 282
287 /* Disable the implemented irqs in hardware. */ 283 /* Disable the implemented irqs in hardware. */
@@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
294 290
295 /* Set up the msi irqs. */ 291 /* Set up the msi irqs. */
296 for (i = 128; i < (128 + 512); ++i) { 292 for (i = 128; i < (128 + 512); ++i) {
297 irq_to_desc(base + i)->status |= IRQ_LEVEL;
298 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); 293 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
294 irq_set_status_flags(i, IRQ_LEVEL);
299 } 295 }
300 296
301 for (i = 0; i < 16; ++i) 297 for (i = 0; i < 16; ++i)
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index bcc1639e8efb..cf7f43dd3147 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
43} 43}
44 44
45static inline void 45static inline void
46mikasa_enable_irq(unsigned int irq) 46mikasa_enable_irq(struct irq_data *d)
47{ 47{
48 mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); 48 mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
49} 49}
50 50
51static void 51static void
52mikasa_disable_irq(unsigned int irq) 52mikasa_disable_irq(struct irq_data *d)
53{ 53{
54 mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); 54 mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
55} 55}
56 56
57static struct irq_chip mikasa_irq_type = { 57static struct irq_chip mikasa_irq_type = {
58 .name = "MIKASA", 58 .name = "MIKASA",
59 .unmask = mikasa_enable_irq, 59 .irq_unmask = mikasa_enable_irq,
60 .mask = mikasa_disable_irq, 60 .irq_mask = mikasa_disable_irq,
61 .mask_ack = mikasa_disable_irq, 61 .irq_mask_ack = mikasa_disable_irq,
62}; 62};
63 63
64static void 64static void
@@ -98,8 +98,8 @@ mikasa_init_irq(void)
98 mikasa_update_irq_hw(0); 98 mikasa_update_irq_hw(0);
99 99
100 for (i = 16; i < 32; ++i) { 100 for (i = 16; i < 32; ++i) {
101 irq_to_desc(i)->status |= IRQ_LEVEL;
102 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); 101 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
102 irq_set_status_flags(i, IRQ_LEVEL);
103 } 103 }
104 104
105 init_i8259a_irqs(); 105 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index e88f4ae1260e..92bc188e94a9 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
48} 48}
49 49
50static void 50static void
51noritake_enable_irq(unsigned int irq) 51noritake_enable_irq(struct irq_data *d)
52{ 52{
53 noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); 53 noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
54} 54}
55 55
56static void 56static void
57noritake_disable_irq(unsigned int irq) 57noritake_disable_irq(struct irq_data *d)
58{ 58{
59 noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); 59 noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
60} 60}
61 61
62static struct irq_chip noritake_irq_type = { 62static struct irq_chip noritake_irq_type = {
63 .name = "NORITAKE", 63 .name = "NORITAKE",
64 .unmask = noritake_enable_irq, 64 .irq_unmask = noritake_enable_irq,
65 .mask = noritake_disable_irq, 65 .irq_mask = noritake_disable_irq,
66 .mask_ack = noritake_disable_irq, 66 .irq_mask_ack = noritake_disable_irq,
67}; 67};
68 68
69static void 69static void
@@ -127,8 +127,8 @@ noritake_init_irq(void)
127 outw(0, 0x54c); 127 outw(0, 0x54c);
128 128
129 for (i = 16; i < 48; ++i) { 129 for (i = 16; i < 48; ++i) {
130 irq_to_desc(i)->status |= IRQ_LEVEL;
131 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); 130 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
131 irq_set_status_flags(i, IRQ_LEVEL);
132 } 132 }
133 133
134 init_i8259a_irqs(); 134 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 6a51364dd1cc..936d4140ed5f 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) 56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
57 57
58static inline void 58static inline void
59rawhide_enable_irq(unsigned int irq) 59rawhide_enable_irq(struct irq_data *d)
60{ 60{
61 unsigned int mask, hose; 61 unsigned int mask, hose;
62 unsigned int irq = d->irq;
62 63
63 irq -= 16; 64 irq -= 16;
64 hose = irq / 24; 65 hose = irq / 24;
@@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
76} 77}
77 78
78static void 79static void
79rawhide_disable_irq(unsigned int irq) 80rawhide_disable_irq(struct irq_data *d)
80{ 81{
81 unsigned int mask, hose; 82 unsigned int mask, hose;
83 unsigned int irq = d->irq;
82 84
83 irq -= 16; 85 irq -= 16;
84 hose = irq / 24; 86 hose = irq / 24;
@@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
96} 98}
97 99
98static void 100static void
99rawhide_mask_and_ack_irq(unsigned int irq) 101rawhide_mask_and_ack_irq(struct irq_data *d)
100{ 102{
101 unsigned int mask, mask1, hose; 103 unsigned int mask, mask1, hose;
104 unsigned int irq = d->irq;
102 105
103 irq -= 16; 106 irq -= 16;
104 hose = irq / 24; 107 hose = irq / 24;
@@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
123 126
124static struct irq_chip rawhide_irq_type = { 127static struct irq_chip rawhide_irq_type = {
125 .name = "RAWHIDE", 128 .name = "RAWHIDE",
126 .unmask = rawhide_enable_irq, 129 .irq_unmask = rawhide_enable_irq,
127 .mask = rawhide_disable_irq, 130 .irq_mask = rawhide_disable_irq,
128 .mask_ack = rawhide_mask_and_ack_irq, 131 .irq_mask_ack = rawhide_mask_and_ack_irq,
129}; 132};
130 133
131static void 134static void
@@ -177,8 +180,8 @@ rawhide_init_irq(void)
177 } 180 }
178 181
179 for (i = 16; i < 128; ++i) { 182 for (i = 16; i < 128; ++i) {
180 irq_to_desc(i)->status |= IRQ_LEVEL;
181 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); 183 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
184 irq_set_status_flags(i, IRQ_LEVEL);
182 } 185 }
183 186
184 init_i8259a_irqs(); 187 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index 89e7e37ec84c..cea22a62913b 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
47} 47}
48 48
49static inline void 49static inline void
50rx164_enable_irq(unsigned int irq) 50rx164_enable_irq(struct irq_data *d)
51{ 51{
52 rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 52 rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
53} 53}
54 54
55static void 55static void
56rx164_disable_irq(unsigned int irq) 56rx164_disable_irq(struct irq_data *d)
57{ 57{
58 rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 58 rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
59} 59}
60 60
61static struct irq_chip rx164_irq_type = { 61static struct irq_chip rx164_irq_type = {
62 .name = "RX164", 62 .name = "RX164",
63 .unmask = rx164_enable_irq, 63 .irq_unmask = rx164_enable_irq,
64 .mask = rx164_disable_irq, 64 .irq_mask = rx164_disable_irq,
65 .mask_ack = rx164_disable_irq, 65 .irq_mask_ack = rx164_disable_irq,
66}; 66};
67 67
68static void 68static void
@@ -99,8 +99,8 @@ rx164_init_irq(void)
99 99
100 rx164_update_irq_hw(0); 100 rx164_update_irq_hw(0);
101 for (i = 16; i < 40; ++i) { 101 for (i = 16; i < 40; ++i) {
102 irq_to_desc(i)->status |= IRQ_LEVEL;
103 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); 102 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
103 irq_set_status_flags(i, IRQ_LEVEL);
104 } 104 }
105 105
106 init_i8259a_irqs(); 106 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 5c4423d1b06c..a349538aabc9 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
443/* GENERIC irq routines */ 443/* GENERIC irq routines */
444 444
445static inline void 445static inline void
446sable_lynx_enable_irq(unsigned int irq) 446sable_lynx_enable_irq(struct irq_data *d)
447{ 447{
448 unsigned long bit, mask; 448 unsigned long bit, mask;
449 449
450 bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 450 bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
451 spin_lock(&sable_lynx_irq_lock); 451 spin_lock(&sable_lynx_irq_lock);
452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); 452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
459} 459}
460 460
461static void 461static void
462sable_lynx_disable_irq(unsigned int irq) 462sable_lynx_disable_irq(struct irq_data *d)
463{ 463{
464 unsigned long bit, mask; 464 unsigned long bit, mask;
465 465
466 bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 466 bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
467 spin_lock(&sable_lynx_irq_lock); 467 spin_lock(&sable_lynx_irq_lock);
468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
475} 475}
476 476
477static void 477static void
478sable_lynx_mask_and_ack_irq(unsigned int irq) 478sable_lynx_mask_and_ack_irq(struct irq_data *d)
479{ 479{
480 unsigned long bit, mask; 480 unsigned long bit, mask;
481 481
482 bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 482 bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
483 spin_lock(&sable_lynx_irq_lock); 483 spin_lock(&sable_lynx_irq_lock);
484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
489 489
490static struct irq_chip sable_lynx_irq_type = { 490static struct irq_chip sable_lynx_irq_type = {
491 .name = "SABLE/LYNX", 491 .name = "SABLE/LYNX",
492 .unmask = sable_lynx_enable_irq, 492 .irq_unmask = sable_lynx_enable_irq,
493 .mask = sable_lynx_disable_irq, 493 .irq_mask = sable_lynx_disable_irq,
494 .mask_ack = sable_lynx_mask_and_ack_irq, 494 .irq_mask_ack = sable_lynx_mask_and_ack_irq,
495}; 495};
496 496
497static void 497static void
@@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
518 long i; 518 long i;
519 519
520 for (i = 0; i < nr_of_irqs; ++i) { 520 for (i = 0; i < nr_of_irqs; ++i) {
521 irq_to_desc(i)->status |= IRQ_LEVEL;
522 set_irq_chip_and_handler(i, &sable_lynx_irq_type, 521 set_irq_chip_and_handler(i, &sable_lynx_irq_type,
523 handle_level_irq); 522 handle_level_irq);
523 irq_set_status_flags(i, IRQ_LEVEL);
524 } 524 }
525 525
526 common_init_isa_dma(); 526 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index f8a1e8a862fb..42a5331f13c4 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
45} 45}
46 46
47static inline void 47static inline void
48takara_enable_irq(unsigned int irq) 48takara_enable_irq(struct irq_data *d)
49{ 49{
50 unsigned int irq = d->irq;
50 unsigned long mask; 51 unsigned long mask;
51 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 52 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
52 takara_update_irq_hw(irq, mask); 53 takara_update_irq_hw(irq, mask);
53} 54}
54 55
55static void 56static void
56takara_disable_irq(unsigned int irq) 57takara_disable_irq(struct irq_data *d)
57{ 58{
59 unsigned int irq = d->irq;
58 unsigned long mask; 60 unsigned long mask;
59 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 61 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
60 takara_update_irq_hw(irq, mask); 62 takara_update_irq_hw(irq, mask);
@@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
62 64
63static struct irq_chip takara_irq_type = { 65static struct irq_chip takara_irq_type = {
64 .name = "TAKARA", 66 .name = "TAKARA",
65 .unmask = takara_enable_irq, 67 .irq_unmask = takara_enable_irq,
66 .mask = takara_disable_irq, 68 .irq_mask = takara_disable_irq,
67 .mask_ack = takara_disable_irq, 69 .irq_mask_ack = takara_disable_irq,
68}; 70};
69 71
70static void 72static void
@@ -136,8 +138,8 @@ takara_init_irq(void)
136 takara_update_irq_hw(i, -1); 138 takara_update_irq_hw(i, -1);
137 139
138 for (i = 16; i < 128; ++i) { 140 for (i = 16; i < 128; ++i) {
139 irq_to_desc(i)->status |= IRQ_LEVEL;
140 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); 141 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
142 irq_set_status_flags(i, IRQ_LEVEL);
141 } 143 }
142 144
143 common_init_isa_dma(); 145 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index e02494bf5ef3..8c13a0c77830 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
112} 112}
113 113
114static inline void 114static inline void
115titan_enable_irq(unsigned int irq) 115titan_enable_irq(struct irq_data *d)
116{ 116{
117 unsigned int irq = d->irq;
117 spin_lock(&titan_irq_lock); 118 spin_lock(&titan_irq_lock);
118 titan_cached_irq_mask |= 1UL << (irq - 16); 119 titan_cached_irq_mask |= 1UL << (irq - 16);
119 titan_update_irq_hw(titan_cached_irq_mask); 120 titan_update_irq_hw(titan_cached_irq_mask);
@@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
121} 122}
122 123
123static inline void 124static inline void
124titan_disable_irq(unsigned int irq) 125titan_disable_irq(struct irq_data *d)
125{ 126{
127 unsigned int irq = d->irq;
126 spin_lock(&titan_irq_lock); 128 spin_lock(&titan_irq_lock);
127 titan_cached_irq_mask &= ~(1UL << (irq - 16)); 129 titan_cached_irq_mask &= ~(1UL << (irq - 16));
128 titan_update_irq_hw(titan_cached_irq_mask); 130 titan_update_irq_hw(titan_cached_irq_mask);
@@ -144,8 +146,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
144} 146}
145 147
146static int 148static int
147titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) 149titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
150 bool force)
148{ 151{
152 unsigned int irq = d->irq;
149 spin_lock(&titan_irq_lock); 153 spin_lock(&titan_irq_lock);
150 titan_cpu_set_irq_affinity(irq - 16, *affinity); 154 titan_cpu_set_irq_affinity(irq - 16, *affinity);
151 titan_update_irq_hw(titan_cached_irq_mask); 155 titan_update_irq_hw(titan_cached_irq_mask);
@@ -175,17 +179,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
175{ 179{
176 long i; 180 long i;
177 for (i = imin; i <= imax; ++i) { 181 for (i = imin; i <= imax; ++i) {
178 irq_to_desc(i)->status |= IRQ_LEVEL;
179 set_irq_chip_and_handler(i, ops, handle_level_irq); 182 set_irq_chip_and_handler(i, ops, handle_level_irq);
183 irq_set_status_flags(i, IRQ_LEVEL);
180 } 184 }
181} 185}
182 186
183static struct irq_chip titan_irq_type = { 187static struct irq_chip titan_irq_type = {
184 .name = "TITAN", 188 .name = "TITAN",
185 .unmask = titan_enable_irq, 189 .irq_unmask = titan_enable_irq,
186 .mask = titan_disable_irq, 190 .irq_mask = titan_disable_irq,
187 .mask_ack = titan_disable_irq, 191 .irq_mask_ack = titan_disable_irq,
188 .set_affinity = titan_set_irq_affinity, 192 .irq_set_affinity = titan_set_irq_affinity,
189}; 193};
190 194
191static irqreturn_t 195static irqreturn_t
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index eec52594d410..ca60a387ef0a 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
104} 104}
105 105
106static void 106static void
107wildfire_enable_irq(unsigned int irq) 107wildfire_enable_irq(struct irq_data *d)
108{ 108{
109 unsigned int irq = d->irq;
110
109 if (irq < 16) 111 if (irq < 16)
110 i8259a_enable_irq(irq); 112 i8259a_enable_irq(d);
111 113
112 spin_lock(&wildfire_irq_lock); 114 spin_lock(&wildfire_irq_lock);
113 set_bit(irq, &cached_irq_mask); 115 set_bit(irq, &cached_irq_mask);
@@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
116} 118}
117 119
118static void 120static void
119wildfire_disable_irq(unsigned int irq) 121wildfire_disable_irq(struct irq_data *d)
120{ 122{
123 unsigned int irq = d->irq;
124
121 if (irq < 16) 125 if (irq < 16)
122 i8259a_disable_irq(irq); 126 i8259a_disable_irq(d);
123 127
124 spin_lock(&wildfire_irq_lock); 128 spin_lock(&wildfire_irq_lock);
125 clear_bit(irq, &cached_irq_mask); 129 clear_bit(irq, &cached_irq_mask);
@@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
128} 132}
129 133
130static void 134static void
131wildfire_mask_and_ack_irq(unsigned int irq) 135wildfire_mask_and_ack_irq(struct irq_data *d)
132{ 136{
137 unsigned int irq = d->irq;
138
133 if (irq < 16) 139 if (irq < 16)
134 i8259a_mask_and_ack_irq(irq); 140 i8259a_mask_and_ack_irq(d);
135 141
136 spin_lock(&wildfire_irq_lock); 142 spin_lock(&wildfire_irq_lock);
137 clear_bit(irq, &cached_irq_mask); 143 clear_bit(irq, &cached_irq_mask);
@@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
141 147
142static struct irq_chip wildfire_irq_type = { 148static struct irq_chip wildfire_irq_type = {
143 .name = "WILDFIRE", 149 .name = "WILDFIRE",
144 .unmask = wildfire_enable_irq, 150 .irq_unmask = wildfire_enable_irq,
145 .mask = wildfire_disable_irq, 151 .irq_mask = wildfire_disable_irq,
146 .mask_ack = wildfire_mask_and_ack_irq, 152 .irq_mask_ack = wildfire_mask_and_ack_irq,
147}; 153};
148 154
149static void __init 155static void __init
@@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
177 for (i = 0; i < 16; ++i) { 183 for (i = 0; i < 16; ++i) {
178 if (i == 2) 184 if (i == 2)
179 continue; 185 continue;
180 irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
181 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 186 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
182 handle_level_irq); 187 handle_level_irq);
188 irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
183 } 189 }
184 190
185 irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
186 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, 191 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
187 handle_level_irq); 192 handle_level_irq);
193 irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
188 for (i = 40; i < 64; ++i) { 194 for (i = 40; i < 64; ++i) {
189 irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
190 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 195 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
191 handle_level_irq); 196 handle_level_irq);
197 irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
192 } 198 }
193 199
194 setup_irq(32+irq_bias, &isa_enable); 200 setup_irq(32+irq_bias, &isa_enable);
195} 201}
196 202
197static void __init 203static void __init
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 778655f0257a..ea5ee4d067f3 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -6,6 +6,8 @@ config ARM_VIC
6 6
7config ARM_VIC_NR 7config ARM_VIC_NR
8 int 8 int
9 default 4 if ARCH_S5PV210
10 default 3 if ARCH_S5P6442 || ARCH_S5PC100
9 default 2 11 default 2
10 depends on ARM_VIC 12 depends on ARM_VIC
11 help 13 help
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 3a0893a76a3b..bf13b814c1b8 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -15,10 +15,6 @@ struct meminfo;
15struct sys_timer; 15struct sys_timer;
16 16
17struct machine_desc { 17struct machine_desc {
18 /*
19 * Note! The first two elements are used
20 * by assembler code in head.S, head-common.S
21 */
22 unsigned int nr; /* architecture number */ 18 unsigned int nr; /* architecture number */
23 const char *name; /* architecture name */ 19 const char *name; /* architecture name */
24 unsigned long boot_params; /* tagged list */ 20 unsigned long boot_params; /* tagged list */
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 9763be04f77e..22de005f159c 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -10,6 +10,8 @@
10#ifndef _ASMARM_PGALLOC_H 10#ifndef _ASMARM_PGALLOC_H
11#define _ASMARM_PGALLOC_H 11#define _ASMARM_PGALLOC_H
12 12
13#include <linux/pagemap.h>
14
13#include <asm/domain.h> 15#include <asm/domain.h>
14#include <asm/pgtable-hwdef.h> 16#include <asm/pgtable-hwdef.h>
15#include <asm/processor.h> 17#include <asm/processor.h>
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index d600bd350704..44b84fe6e1b0 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
836/* 836/*
837 * One-time initialisation. 837 * One-time initialisation.
838 */ 838 */
839static void reset_ctrl_regs(void *unused) 839static void reset_ctrl_regs(void *info)
840{ 840{
841 int i; 841 int i, cpu = smp_processor_id();
842 u32 dbg_power;
843 cpumask_t *cpumask = info;
842 844
843 /* 845 /*
844 * v7 debug contains save and restore registers so that debug state 846 * v7 debug contains save and restore registers so that debug state
@@ -850,6 +852,17 @@ static void reset_ctrl_regs(void *unused)
850 */ 852 */
851 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { 853 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
852 /* 854 /*
855 * Ensure sticky power-down is clear (i.e. debug logic is
856 * powered up).
857 */
858 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
859 if ((dbg_power & 0x1) == 0) {
860 pr_warning("CPU %d debug is powered down!\n", cpu);
861 cpumask_or(cpumask, cpumask, cpumask_of(cpu));
862 return;
863 }
864
865 /*
853 * Unconditionally clear the lock by writing a value 866 * Unconditionally clear the lock by writing a value
854 * other than 0xC5ACCE55 to the access register. 867 * other than 0xC5ACCE55 to the access register.
855 */ 868 */
@@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
887static int __init arch_hw_breakpoint_init(void) 900static int __init arch_hw_breakpoint_init(void)
888{ 901{
889 u32 dscr; 902 u32 dscr;
903 cpumask_t cpumask = { CPU_BITS_NONE };
890 904
891 debug_arch = get_debug_arch(); 905 debug_arch = get_debug_arch();
892 906
@@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void)
911 * Reset the breakpoint resources. We assume that a halting 925 * Reset the breakpoint resources. We assume that a halting
912 * debugger will leave the world in a nice state for us. 926 * debugger will leave the world in a nice state for us.
913 */ 927 */
914 on_each_cpu(reset_ctrl_regs, NULL, 1); 928 on_each_cpu(reset_ctrl_regs, &cpumask, 1);
929 if (!cpumask_empty(&cpumask)) {
930 core_num_brps = 0;
931 core_num_reserved_brps = 0;
932 core_num_wrps = 0;
933 return 0;
934 }
915 935
916 ARM_DBG_READ(c1, 0, dscr); 936 ARM_DBG_READ(c1, 0, dscr);
917 if (dscr & ARM_DSCR_HDBGEN) { 937 if (dscr & ARM_DSCR_HDBGEN) {
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 19c6816db61e..b13e70f63d71 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
996 while (!(arch_ctrl.len & 0x1)) 996 while (!(arch_ctrl.len & 0x1))
997 arch_ctrl.len >>= 1; 997 arch_ctrl.len >>= 1;
998 998
999 if (idx & 0x1) 999 if (num & 0x1)
1000 reg = encode_ctrl_reg(arch_ctrl);
1001 else
1002 reg = bp->attr.bp_addr; 1000 reg = bp->attr.bp_addr;
1001 else
1002 reg = encode_ctrl_reg(arch_ctrl);
1003 } 1003 }
1004 1004
1005put: 1005put:
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
index 343de73161fa..4a68c2b1ec11 100644
--- a/arch/arm/mach-davinci/cpufreq.c
+++ b/arch/arm/mach-davinci/cpufreq.c
@@ -132,7 +132,7 @@ out:
132 return ret; 132 return ret;
133} 133}
134 134
135static int __init davinci_cpu_init(struct cpufreq_policy *policy) 135static int davinci_cpu_init(struct cpufreq_policy *policy)
136{ 136{
137 int result = 0; 137 int result = 0;
138 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; 138 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 9eec63070e0c..beda8a4133a0 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -480,8 +480,15 @@ static struct platform_device da850_mcasp_device = {
480 .resource = da850_mcasp_resources, 480 .resource = da850_mcasp_resources,
481}; 481};
482 482
483struct platform_device davinci_pcm_device = {
484 .name = "davinci-pcm-audio",
485 .id = -1,
486};
487
483void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) 488void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
484{ 489{
490 platform_device_register(&davinci_pcm_device);
491
485 /* DA830/OMAP-L137 has 3 instances of McASP */ 492 /* DA830/OMAP-L137 has 3 instances of McASP */
486 if (cpu_is_davinci_da830() && id == 1) { 493 if (cpu_is_davinci_da830() && id == 1) {
487 da830_mcasp1_device.dev.platform_data = pdata; 494 da830_mcasp1_device.dev.platform_data = pdata;
diff --git a/arch/arm/mach-davinci/gpio-tnetv107x.c b/arch/arm/mach-davinci/gpio-tnetv107x.c
index d10298620e2c..3fa3e2867e19 100644
--- a/arch/arm/mach-davinci/gpio-tnetv107x.c
+++ b/arch/arm/mach-davinci/gpio-tnetv107x.c
@@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
58 58
59 spin_lock_irqsave(&ctlr->lock, flags); 59 spin_lock_irqsave(&ctlr->lock, flags);
60 60
61 gpio_reg_set_bit(&regs->enable, gpio); 61 gpio_reg_set_bit(regs->enable, gpio);
62 62
63 spin_unlock_irqrestore(&ctlr->lock, flags); 63 spin_unlock_irqrestore(&ctlr->lock, flags);
64 64
@@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
74 74
75 spin_lock_irqsave(&ctlr->lock, flags); 75 spin_lock_irqsave(&ctlr->lock, flags);
76 76
77 gpio_reg_clear_bit(&regs->enable, gpio); 77 gpio_reg_clear_bit(regs->enable, gpio);
78 78
79 spin_unlock_irqrestore(&ctlr->lock, flags); 79 spin_unlock_irqrestore(&ctlr->lock, flags);
80} 80}
@@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
88 88
89 spin_lock_irqsave(&ctlr->lock, flags); 89 spin_lock_irqsave(&ctlr->lock, flags);
90 90
91 gpio_reg_set_bit(&regs->direction, gpio); 91 gpio_reg_set_bit(regs->direction, gpio);
92 92
93 spin_unlock_irqrestore(&ctlr->lock, flags); 93 spin_unlock_irqrestore(&ctlr->lock, flags);
94 94
@@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
106 spin_lock_irqsave(&ctlr->lock, flags); 106 spin_lock_irqsave(&ctlr->lock, flags);
107 107
108 if (value) 108 if (value)
109 gpio_reg_set_bit(&regs->data_out, gpio); 109 gpio_reg_set_bit(regs->data_out, gpio);
110 else 110 else
111 gpio_reg_clear_bit(&regs->data_out, gpio); 111 gpio_reg_clear_bit(regs->data_out, gpio);
112 112
113 gpio_reg_clear_bit(&regs->direction, gpio); 113 gpio_reg_clear_bit(regs->direction, gpio);
114 114
115 spin_unlock_irqrestore(&ctlr->lock, flags); 115 spin_unlock_irqrestore(&ctlr->lock, flags);
116 116
@@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
124 unsigned gpio = chip->base + offset; 124 unsigned gpio = chip->base + offset;
125 int ret; 125 int ret;
126 126
127 ret = gpio_reg_get_bit(&regs->data_in, gpio); 127 ret = gpio_reg_get_bit(regs->data_in, gpio);
128 128
129 return ret ? 1 : 0; 129 return ret ? 1 : 0;
130} 130}
@@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip,
140 spin_lock_irqsave(&ctlr->lock, flags); 140 spin_lock_irqsave(&ctlr->lock, flags);
141 141
142 if (value) 142 if (value)
143 gpio_reg_set_bit(&regs->data_out, gpio); 143 gpio_reg_set_bit(regs->data_out, gpio);
144 else 144 else
145 gpio_reg_clear_bit(&regs->data_out, gpio); 145 gpio_reg_clear_bit(regs->data_out, gpio);
146 146
147 spin_unlock_irqrestore(&ctlr->lock, flags); 147 spin_unlock_irqrestore(&ctlr->lock, flags);
148} 148}
diff --git a/arch/arm/mach-davinci/include/mach/clkdev.h b/arch/arm/mach-davinci/include/mach/clkdev.h
index 730c49d1ebd8..14a504887189 100644
--- a/arch/arm/mach-davinci/include/mach/clkdev.h
+++ b/arch/arm/mach-davinci/include/mach/clkdev.h
@@ -1,6 +1,8 @@
1#ifndef __MACH_CLKDEV_H 1#ifndef __MACH_CLKDEV_H
2#define __MACH_CLKDEV_H 2#define __MACH_CLKDEV_H
3 3
4struct clk;
5
4static inline int __clk_get(struct clk *clk) 6static inline int __clk_get(struct clk *clk)
5{ 7{
6 return 1; 8 return 1;
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 0a585dfa9874..24b88504df0f 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -193,10 +193,12 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
193 omap_mbox_type_t irq) 193 omap_mbox_type_t irq)
194{ 194{
195 struct omap_mbox2_priv *p = mbox->priv; 195 struct omap_mbox2_priv *p = mbox->priv;
196 u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; 196 u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
197 l = mbox_read_reg(p->irqdisable); 197
198 l &= ~bit; 198 if (!cpu_is_omap44xx())
199 mbox_write_reg(l, p->irqdisable); 199 bit = mbox_read_reg(p->irqdisable) & ~bit;
200
201 mbox_write_reg(bit, p->irqdisable);
200} 202}
201 203
202static void omap2_mbox_ack_irq(struct omap_mbox *mbox, 204static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 95ac336fe3f7..1a777e34d0c2 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -282,6 +282,7 @@ error:
282 dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" 282 dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
283 "interrupt handler. Smartreflex will" 283 "interrupt handler. Smartreflex will"
284 "not function as desired\n", __func__); 284 "not function as desired\n", __func__);
285 kfree(name);
285 kfree(sr_info); 286 kfree(sr_info);
286 return ret; 287 return ret;
287} 288}
@@ -879,7 +880,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
879 ret = sr_late_init(sr_info); 880 ret = sr_late_init(sr_info);
880 if (ret) { 881 if (ret) {
881 pr_warning("%s: Error in SR late init\n", __func__); 882 pr_warning("%s: Error in SR late init\n", __func__);
882 return ret; 883 goto err_release_region;
883 } 884 }
884 } 885 }
885 886
@@ -890,14 +891,17 @@ static int __init omap_sr_probe(struct platform_device *pdev)
890 * not try to create rest of the debugfs entries. 891 * not try to create rest of the debugfs entries.
891 */ 892 */
892 vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); 893 vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
893 if (!vdd_dbg_dir) 894 if (!vdd_dbg_dir) {
894 return -EINVAL; 895 ret = -EINVAL;
896 goto err_release_region;
897 }
895 898
896 dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); 899 dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
897 if (IS_ERR(dbg_dir)) { 900 if (IS_ERR(dbg_dir)) {
898 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", 901 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
899 __func__); 902 __func__);
900 return PTR_ERR(dbg_dir); 903 ret = PTR_ERR(dbg_dir);
904 goto err_release_region;
901 } 905 }
902 906
903 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, 907 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
@@ -913,7 +917,8 @@ static int __init omap_sr_probe(struct platform_device *pdev)
913 if (IS_ERR(nvalue_dir)) { 917 if (IS_ERR(nvalue_dir)) {
914 dev_err(&pdev->dev, "%s: Unable to create debugfs directory" 918 dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
915 "for n-values\n", __func__); 919 "for n-values\n", __func__);
916 return PTR_ERR(nvalue_dir); 920 ret = PTR_ERR(nvalue_dir);
921 goto err_release_region;
917 } 922 }
918 923
919 omap_voltage_get_volttable(sr_info->voltdm, &volt_data); 924 omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
@@ -922,23 +927,15 @@ static int __init omap_sr_probe(struct platform_device *pdev)
922 " corresponding vdd vdd_%s. Cannot create debugfs" 927 " corresponding vdd vdd_%s. Cannot create debugfs"
923 "entries for n-values\n", 928 "entries for n-values\n",
924 __func__, sr_info->voltdm->name); 929 __func__, sr_info->voltdm->name);
925 return -ENODATA; 930 ret = -ENODATA;
931 goto err_release_region;
926 } 932 }
927 933
928 for (i = 0; i < sr_info->nvalue_count; i++) { 934 for (i = 0; i < sr_info->nvalue_count; i++) {
929 char *name; 935 char name[NVALUE_NAME_LEN + 1];
930 char volt_name[32];
931
932 name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL);
933 if (!name) {
934 dev_err(&pdev->dev, "%s: Unable to allocate memory"
935 " for n-value directory name\n", __func__);
936 return -ENOMEM;
937 }
938 936
939 strcpy(name, "volt_"); 937 snprintf(name, sizeof(name), "volt_%d",
940 sprintf(volt_name, "%d", volt_data[i].volt_nominal); 938 volt_data[i].volt_nominal);
941 strcat(name, volt_name);
942 (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, 939 (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
943 &(sr_info->nvalue_table[i].nvalue)); 940 &(sr_info->nvalue_table[i].nvalue));
944 } 941 }
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index fbc5b775f895..b166b1d845d7 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -347,6 +347,7 @@ static struct platform_device *pxa25x_devices[] __initdata = {
347 &pxa25x_device_assp, 347 &pxa25x_device_assp,
348 &pxa25x_device_pwm0, 348 &pxa25x_device_pwm0,
349 &pxa25x_device_pwm1, 349 &pxa25x_device_pwm1,
350 &pxa_device_asoc_platform,
350}; 351};
351 352
352static struct sys_device pxa25x_sysdev[] = { 353static struct sys_device pxa25x_sysdev[] = {
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
index c31e601eb49c..b9b1e5c2b290 100644
--- a/arch/arm/mach-pxa/tosa-bt.c
+++ b/arch/arm/mach-pxa/tosa-bt.c
@@ -81,8 +81,6 @@ static int tosa_bt_probe(struct platform_device *dev)
81 goto err_rfk_alloc; 81 goto err_rfk_alloc;
82 } 82 }
83 83
84 rfkill_set_led_trigger_name(rfk, "tosa-bt");
85
86 rc = rfkill_register(rfk); 84 rc = rfkill_register(rfk);
87 if (rc) 85 if (rc)
88 goto err_rfkill; 86 goto err_rfkill;
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index af152e70cfcf..f2582ec300d9 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -875,6 +875,11 @@ static struct platform_device sharpsl_rom_device = {
875 .dev.platform_data = &sharpsl_rom_data, 875 .dev.platform_data = &sharpsl_rom_data,
876}; 876};
877 877
878static struct platform_device wm9712_device = {
879 .name = "wm9712-codec",
880 .id = -1,
881};
882
878static struct platform_device *devices[] __initdata = { 883static struct platform_device *devices[] __initdata = {
879 &tosascoop_device, 884 &tosascoop_device,
880 &tosascoop_jc_device, 885 &tosascoop_jc_device,
@@ -885,6 +890,7 @@ static struct platform_device *devices[] __initdata = {
885 &tosaled_device, 890 &tosaled_device,
886 &tosa_bt_device, 891 &tosa_bt_device,
887 &sharpsl_rom_device, 892 &sharpsl_rom_device,
893 &wm9712_device,
888}; 894};
889 895
890static void tosa_poweroff(void) 896static void tosa_poweroff(void)
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index a0cb2581894f..50825a3f91cc 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -99,6 +99,7 @@ config MACH_NEO1973_GTA02
99 select POWER_SUPPLY 99 select POWER_SUPPLY
100 select MACH_NEO1973 100 select MACH_NEO1973
101 select S3C2410_PWM 101 select S3C2410_PWM
102 select S3C_DEV_USB_HOST
102 help 103 help
103 Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone 104 Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
104 105
diff --git a/arch/arm/mach-s3c2440/include/mach/gta02.h b/arch/arm/mach-s3c2440/include/mach/gta02.h
index 953331d8d56a..3a56a229cac6 100644
--- a/arch/arm/mach-s3c2440/include/mach/gta02.h
+++ b/arch/arm/mach-s3c2440/include/mach/gta02.h
@@ -44,19 +44,19 @@
44#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */ 44#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
45#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */ 45#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
46 46
47#define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */ 47#define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */
48#define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2 48#define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2)
49#define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */ 49#define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */
50#define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */ 50#define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */
51#define GTA02_GPIO_nGSM_EN S3C2440_GPJ4 51#define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4)
52#define GTA02_GPIO_3D_RESET S3C2440_GPJ5 52#define GTA02_GPIO_3D_RESET S3C2410_GPJ(5)
53#define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */ 53#define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */
54#define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7 54#define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7)
55#define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8 55#define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8)
56#define GTA02_GPIO_KEEPACT S3C2440_GPJ8 56#define GTA02_GPIO_KEEPACT S3C2410_GPJ(8)
57#define GTA02v1_GPIO_HP_IN S3C2440_GPJ10 57#define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10)
58#define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */ 58#define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */
59#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */ 59#define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */
60 60
61#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0 61#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
62#define GTA02_IRQ_MODEM IRQ_EINT1 62#define GTA02_IRQ_MODEM IRQ_EINT1
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index dd3782064508..fdfc4d5e37a1 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -151,6 +151,12 @@ static struct clk init_clocks_off[] = {
151 .enable = s3c64xx_pclk_ctrl, 151 .enable = s3c64xx_pclk_ctrl,
152 .ctrlbit = S3C_CLKCON_PCLK_IIC, 152 .ctrlbit = S3C_CLKCON_PCLK_IIC,
153 }, { 153 }, {
154 .name = "i2c",
155 .id = 1,
156 .parent = &clk_p,
157 .enable = s3c64xx_pclk_ctrl,
158 .ctrlbit = S3C6410_CLKCON_PCLK_I2C1,
159 }, {
154 .name = "iis", 160 .name = "iis",
155 .id = 0, 161 .id = 0,
156 .parent = &clk_p, 162 .parent = &clk_p,
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index 135db1b41252..c35585cf8c4f 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -690,12 +690,12 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
690 690
691 regptr = regs + PL080_Cx_BASE(0); 691 regptr = regs + PL080_Cx_BASE(0);
692 692
693 for (ch = 0; ch < 8; ch++, chno++, chptr++) { 693 for (ch = 0; ch < 8; ch++, chptr++) {
694 printk(KERN_INFO "%s: registering DMA %d (%p)\n", 694 pr_debug("%s: registering DMA %d (%p)\n",
695 __func__, chno, regptr); 695 __func__, chno + ch, regptr);
696 696
697 chptr->bit = 1 << ch; 697 chptr->bit = 1 << ch;
698 chptr->number = chno; 698 chptr->number = chno + ch;
699 chptr->dmac = dmac; 699 chptr->dmac = dmac;
700 chptr->regs = regptr; 700 chptr->regs = regptr;
701 regptr += PL080_Cx_STRIDE; 701 regptr += PL080_Cx_STRIDE;
@@ -704,7 +704,8 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
704 /* for the moment, permanently enable the controller */ 704 /* for the moment, permanently enable the controller */
705 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG); 705 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
706 706
707 printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs); 707 printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
708 irq, regs, chno, chno+8);
708 709
709 return 0; 710 return 0;
710 711
diff --git a/arch/arm/mach-s3c64xx/gpiolib.c b/arch/arm/mach-s3c64xx/gpiolib.c
index fd99a82e82c4..92b09085caaa 100644
--- a/arch/arm/mach-s3c64xx/gpiolib.c
+++ b/arch/arm/mach-s3c64xx/gpiolib.c
@@ -72,7 +72,7 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = {
72 .get_pull = s3c_gpio_getpull_updown, 72 .get_pull = s3c_gpio_getpull_updown,
73}; 73};
74 74
75int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) 75static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
76{ 76{
77 return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO; 77 return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
78} 78}
@@ -138,7 +138,7 @@ static struct s3c_gpio_chip gpio_4bit[] = {
138 }, 138 },
139}; 139};
140 140
141int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) 141static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
142{ 142{
143 return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO; 143 return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO;
144} 144}
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index e85192a86fbe..a80a3163dd30 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -28,6 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/smsc911x.h> 29#include <linux/smsc911x.h>
30#include <linux/regulator/fixed.h> 30#include <linux/regulator/fixed.h>
31#include <linux/regulator/machine.h>
31 32
32#ifdef CONFIG_SMDK6410_WM1190_EV1 33#ifdef CONFIG_SMDK6410_WM1190_EV1
33#include <linux/mfd/wm8350/core.h> 34#include <linux/mfd/wm8350/core.h>
@@ -351,7 +352,7 @@ static struct regulator_init_data smdk6410_vddpll = {
351/* VDD_UH_MMC, LDO5 on J5 */ 352/* VDD_UH_MMC, LDO5 on J5 */
352static struct regulator_init_data smdk6410_vdduh_mmc = { 353static struct regulator_init_data smdk6410_vdduh_mmc = {
353 .constraints = { 354 .constraints = {
354 .name = "PVDD_UH/PVDD_MMC", 355 .name = "PVDD_UH+PVDD_MMC",
355 .always_on = 1, 356 .always_on = 1,
356 }, 357 },
357}; 358};
@@ -417,7 +418,7 @@ static struct regulator_init_data smdk6410_vddaudio = {
417/* S3C64xx internal logic & PLL */ 418/* S3C64xx internal logic & PLL */
418static struct regulator_init_data wm8350_dcdc1_data = { 419static struct regulator_init_data wm8350_dcdc1_data = {
419 .constraints = { 420 .constraints = {
420 .name = "PVDD_INT/PVDD_PLL", 421 .name = "PVDD_INT+PVDD_PLL",
421 .min_uV = 1200000, 422 .min_uV = 1200000,
422 .max_uV = 1200000, 423 .max_uV = 1200000,
423 .always_on = 1, 424 .always_on = 1,
@@ -452,7 +453,7 @@ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = {
452 453
453static struct regulator_init_data wm8350_dcdc4_data = { 454static struct regulator_init_data wm8350_dcdc4_data = {
454 .constraints = { 455 .constraints = {
455 .name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV", 456 .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV",
456 .min_uV = 3000000, 457 .min_uV = 3000000,
457 .max_uV = 3000000, 458 .max_uV = 3000000,
458 .always_on = 1, 459 .always_on = 1,
@@ -464,7 +465,7 @@ static struct regulator_init_data wm8350_dcdc4_data = {
464/* OTGi/1190-EV1 HPVDD & AVDD */ 465/* OTGi/1190-EV1 HPVDD & AVDD */
465static struct regulator_init_data wm8350_ldo4_data = { 466static struct regulator_init_data wm8350_ldo4_data = {
466 .constraints = { 467 .constraints = {
467 .name = "PVDD_OTGI/HPVDD/AVDD", 468 .name = "PVDD_OTGI+HPVDD+AVDD",
468 .min_uV = 1200000, 469 .min_uV = 1200000,
469 .max_uV = 1200000, 470 .max_uV = 1200000,
470 .apply_uV = 1, 471 .apply_uV = 1,
@@ -552,7 +553,7 @@ static struct wm831x_backlight_pdata wm1192_backlight_pdata = {
552 553
553static struct regulator_init_data wm1192_dcdc3 = { 554static struct regulator_init_data wm1192_dcdc3 = {
554 .constraints = { 555 .constraints = {
555 .name = "PVDD_MEM/PVDD_GPS", 556 .name = "PVDD_MEM+PVDD_GPS",
556 .always_on = 1, 557 .always_on = 1,
557 }, 558 },
558}; 559};
@@ -563,7 +564,7 @@ static struct regulator_consumer_supply wm1192_ldo1_consumers[] = {
563 564
564static struct regulator_init_data wm1192_ldo1 = { 565static struct regulator_init_data wm1192_ldo1 = {
565 .constraints = { 566 .constraints = {
566 .name = "PVDD_LCD/PVDD_EXT", 567 .name = "PVDD_LCD+PVDD_EXT",
567 .always_on = 1, 568 .always_on = 1,
568 }, 569 },
569 .consumer_supplies = wm1192_ldo1_consumers, 570 .consumer_supplies = wm1192_ldo1_consumers,
diff --git a/arch/arm/mach-s3c64xx/setup-keypad.c b/arch/arm/mach-s3c64xx/setup-keypad.c
index f8ed0d22db70..1d4d0ee9e870 100644
--- a/arch/arm/mach-s3c64xx/setup-keypad.c
+++ b/arch/arm/mach-s3c64xx/setup-keypad.c
@@ -17,7 +17,7 @@
17void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols) 17void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
18{ 18{
19 /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */ 19 /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */
20 s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3)); 20 s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3));
21 21
22 /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */ 22 /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */
23 s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3)); 23 s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
diff --git a/arch/arm/mach-s3c64xx/setup-sdhci.c b/arch/arm/mach-s3c64xx/setup-sdhci.c
index 1a942037c4ef..f344a222bc84 100644
--- a/arch/arm/mach-s3c64xx/setup-sdhci.c
+++ b/arch/arm/mach-s3c64xx/setup-sdhci.c
@@ -56,7 +56,7 @@ void s3c6400_setup_sdhci_cfg_card(struct platform_device *dev,
56 else 56 else
57 ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); 57 ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
58 58
59 printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); 59 pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
60 writel(ctrl2, r + S3C_SDHCI_CONTROL2); 60 writel(ctrl2, r + S3C_SDHCI_CONTROL2);
61 writel(ctrl3, r + S3C_SDHCI_CONTROL3); 61 writel(ctrl3, r + S3C_SDHCI_CONTROL3);
62} 62}
diff --git a/arch/arm/mach-s5p64x0/include/mach/gpio.h b/arch/arm/mach-s5p64x0/include/mach/gpio.h
index 5486c8f01f1d..adb5f298ead8 100644
--- a/arch/arm/mach-s5p64x0/include/mach/gpio.h
+++ b/arch/arm/mach-s5p64x0/include/mach/gpio.h
@@ -23,7 +23,7 @@
23#define S5P6440_GPIO_A_NR (6) 23#define S5P6440_GPIO_A_NR (6)
24#define S5P6440_GPIO_B_NR (7) 24#define S5P6440_GPIO_B_NR (7)
25#define S5P6440_GPIO_C_NR (8) 25#define S5P6440_GPIO_C_NR (8)
26#define S5P6440_GPIO_F_NR (2) 26#define S5P6440_GPIO_F_NR (16)
27#define S5P6440_GPIO_G_NR (7) 27#define S5P6440_GPIO_G_NR (7)
28#define S5P6440_GPIO_H_NR (10) 28#define S5P6440_GPIO_H_NR (10)
29#define S5P6440_GPIO_I_NR (16) 29#define S5P6440_GPIO_I_NR (16)
@@ -36,7 +36,7 @@
36#define S5P6450_GPIO_B_NR (7) 36#define S5P6450_GPIO_B_NR (7)
37#define S5P6450_GPIO_C_NR (8) 37#define S5P6450_GPIO_C_NR (8)
38#define S5P6450_GPIO_D_NR (8) 38#define S5P6450_GPIO_D_NR (8)
39#define S5P6450_GPIO_F_NR (2) 39#define S5P6450_GPIO_F_NR (16)
40#define S5P6450_GPIO_G_NR (14) 40#define S5P6450_GPIO_G_NR (14)
41#define S5P6450_GPIO_H_NR (10) 41#define S5P6450_GPIO_H_NR (10)
42#define S5P6450_GPIO_I_NR (16) 42#define S5P6450_GPIO_I_NR (16)
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 2123b96b5638..4303a86e6e38 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -454,6 +454,7 @@ static void __init ag5evm_init(void)
454 gpio_direction_output(GPIO_PORT217, 0); 454 gpio_direction_output(GPIO_PORT217, 0);
455 mdelay(1); 455 mdelay(1);
456 gpio_set_value(GPIO_PORT217, 1); 456 gpio_set_value(GPIO_PORT217, 1);
457 mdelay(100);
457 458
458 /* LCD backlight controller */ 459 /* LCD backlight controller */
459 gpio_request(GPIO_PORT235, NULL); /* RESET */ 460 gpio_request(GPIO_PORT235, NULL); /* RESET */
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 3cf0951caa2d..81d6536552a9 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1303,7 +1303,7 @@ static void __init ap4evb_init(void)
1303 1303
1304 lcdc_info.clock_source = LCDC_CLK_BUS; 1304 lcdc_info.clock_source = LCDC_CLK_BUS;
1305 lcdc_info.ch[0].interface_type = RGB18; 1305 lcdc_info.ch[0].interface_type = RGB18;
1306 lcdc_info.ch[0].clock_divider = 2; 1306 lcdc_info.ch[0].clock_divider = 3;
1307 lcdc_info.ch[0].flags = 0; 1307 lcdc_info.ch[0].flags = 0;
1308 lcdc_info.ch[0].lcd_size_cfg.width = 152; 1308 lcdc_info.ch[0].lcd_size_cfg.width = 152;
1309 lcdc_info.ch[0].lcd_size_cfg.height = 91; 1309 lcdc_info.ch[0].lcd_size_cfg.height = 91;
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index fb4213a4e15a..1657eac5dde2 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -303,7 +303,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
303 .lcd_cfg = mackerel_lcdc_modes, 303 .lcd_cfg = mackerel_lcdc_modes,
304 .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), 304 .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
305 .interface_type = RGB24, 305 .interface_type = RGB24,
306 .clock_divider = 2, 306 .clock_divider = 3,
307 .flags = 0, 307 .flags = 0,
308 .lcd_size_cfg.width = 152, 308 .lcd_size_cfg.width = 152,
309 .lcd_size_cfg.height = 91, 309 .lcd_size_cfg.height = 91,
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index ddd4a1b775f0..7e58904c1c8c 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -263,7 +263,7 @@ static struct clk div6_clks[DIV6_NR] = {
263}; 263};
264 264
265enum { MSTP001, 265enum { MSTP001,
266 MSTP125, MSTP118, MSTP116, MSTP100, 266 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
267 MSTP219, 267 MSTP219,
268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, 269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
@@ -275,6 +275,10 @@ enum { MSTP001,
275 275
276static struct clk mstp_clks[MSTP_NR] = { 276static struct clk mstp_clks[MSTP_NR] = {
277 [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ 277 [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
278 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */
279 [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */
280 [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */
281 [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */
278 [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ 282 [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
279 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ 283 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
280 [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ 284 [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
@@ -306,6 +310,9 @@ static struct clk_lookup lookups[] = {
306 CLKDEV_CON_ID("r_clk", &r_clk), 310 CLKDEV_CON_ID("r_clk", &r_clk),
307 311
308 /* DIV6 clocks */ 312 /* DIV6 clocks */
313 CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
314 CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
315 CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
309 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), 316 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
310 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), 317 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
311 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), 318 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
@@ -313,11 +320,15 @@ static struct clk_lookup lookups[] = {
313 320
314 /* MSTP32 clocks */ 321 /* MSTP32 clocks */
315 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ 322 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
316 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ 323 CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */
324 CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */
325 CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */
326 CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */
317 CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ 327 CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
318 CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ 328 CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
319 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
320 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ 329 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
330 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
331 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
321 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ 332 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
322 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 333 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
323 CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ 334 CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
index efd3687ba190..3029aba38688 100644
--- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
@@ -6,13 +6,10 @@ LIST "RWT Setting"
6EW 0xE6020004, 0xA500 6EW 0xE6020004, 0xA500
7EW 0xE6030004, 0xA500 7EW 0xE6030004, 0xA500
8 8
9DD 0x01001000, 0x01001000
10
11LIST "GPIO Setting" 9LIST "GPIO Setting"
12EB 0xE6051013, 0xA2 10EB 0xE6051013, 0xA2
13 11
14LIST "CPG" 12LIST "CPG"
15ED 0xE6150080, 0x00000180
16ED 0xE61500C0, 0x00000002 13ED 0xE61500C0, 0x00000002
17 14
18WAIT 1, 0xFE40009C 15WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
37 34
38WAIT 1, 0xFE40009C 35WAIT 1, 0xFE40009C
39 36
37LIST "SUB/USBClk"
38ED 0xE6150080, 0x00000180
39
40LIST "BSC" 40LIST "BSC"
41ED 0xFEC10000, 0x00E0001B 41ED 0xFEC10000, 0x00E0001B
42 42
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
53ED 0xFE40004C, 0x00110209 53ED 0xFE40004C, 0x00110209
54ED 0xFE400010, 0x00000087 54ED 0xFE400010, 0x00000087
55 55
56WAIT 10, 0xFE40009C 56WAIT 30, 0xFE40009C
57 57
58ED 0xFE400084, 0x0000003F 58ED 0xFE400084, 0x0000003F
59EB 0xFE500000, 0x00 59EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
84 84
85WAIT 1, 0xFE40009C 85WAIT 1, 0xFE40009C
86 86
87ED 0xE6150354, 0x00000002 87ED 0xFE400354, 0x01AD8002
88 88
89LIST "SCIF0 - Serial port for earlyprintk" 89LIST "SCIF0 - Serial port for earlyprintk"
90EB 0xE6053098, 0x11 90EB 0xE6053098, 0x11
diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
index efd3687ba190..3029aba38688 100644
--- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
@@ -6,13 +6,10 @@ LIST "RWT Setting"
6EW 0xE6020004, 0xA500 6EW 0xE6020004, 0xA500
7EW 0xE6030004, 0xA500 7EW 0xE6030004, 0xA500
8 8
9DD 0x01001000, 0x01001000
10
11LIST "GPIO Setting" 9LIST "GPIO Setting"
12EB 0xE6051013, 0xA2 10EB 0xE6051013, 0xA2
13 11
14LIST "CPG" 12LIST "CPG"
15ED 0xE6150080, 0x00000180
16ED 0xE61500C0, 0x00000002 13ED 0xE61500C0, 0x00000002
17 14
18WAIT 1, 0xFE40009C 15WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
37 34
38WAIT 1, 0xFE40009C 35WAIT 1, 0xFE40009C
39 36
37LIST "SUB/USBClk"
38ED 0xE6150080, 0x00000180
39
40LIST "BSC" 40LIST "BSC"
41ED 0xFEC10000, 0x00E0001B 41ED 0xFEC10000, 0x00E0001B
42 42
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
53ED 0xFE40004C, 0x00110209 53ED 0xFE40004C, 0x00110209
54ED 0xFE400010, 0x00000087 54ED 0xFE400010, 0x00000087
55 55
56WAIT 10, 0xFE40009C 56WAIT 30, 0xFE40009C
57 57
58ED 0xFE400084, 0x0000003F 58ED 0xFE400084, 0x0000003F
59EB 0xFE500000, 0x00 59EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
84 84
85WAIT 1, 0xFE40009C 85WAIT 1, 0xFE40009C
86 86
87ED 0xE6150354, 0x00000002 87ED 0xFE400354, 0x01AD8002
88 88
89LIST "SCIF0 - Serial port for earlyprintk" 89LIST "SCIF0 - Serial port for earlyprintk"
90EB 0xE6053098, 0x11 90EB 0xE6053098, 0x11
diff --git a/arch/arm/plat-samsung/dev-uart.c b/arch/arm/plat-samsung/dev-uart.c
index 3776cd952450..5928105490fa 100644
--- a/arch/arm/plat-samsung/dev-uart.c
+++ b/arch/arm/plat-samsung/dev-uart.c
@@ -15,6 +15,8 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17 17
18#include <plat/devs.h>
19
18/* uart devices */ 20/* uart devices */
19 21
20static struct platform_device s3c24xx_uart_device0 = { 22static struct platform_device s3c24xx_uart_device0 = {
diff --git a/arch/blackfin/lib/outs.S b/arch/blackfin/lib/outs.S
index 250f4d4b9436..06a5e674401f 100644
--- a/arch/blackfin/lib/outs.S
+++ b/arch/blackfin/lib/outs.S
@@ -13,6 +13,8 @@
13.align 2 13.align 2
14 14
15ENTRY(_outsl) 15ENTRY(_outsl)
16 CC = R2 == 0;
17 IF CC JUMP 1f;
16 P0 = R0; /* P0 = port */ 18 P0 = R0; /* P0 = port */
17 P1 = R1; /* P1 = address */ 19 P1 = R1; /* P1 = address */
18 P2 = R2; /* P2 = count */ 20 P2 = R2; /* P2 = count */
@@ -20,10 +22,12 @@ ENTRY(_outsl)
20 LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; 22 LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
21.Llong_loop_s: R0 = [P1++]; 23.Llong_loop_s: R0 = [P1++];
22.Llong_loop_e: [P0] = R0; 24.Llong_loop_e: [P0] = R0;
23 RTS; 251: RTS;
24ENDPROC(_outsl) 26ENDPROC(_outsl)
25 27
26ENTRY(_outsw) 28ENTRY(_outsw)
29 CC = R2 == 0;
30 IF CC JUMP 1f;
27 P0 = R0; /* P0 = port */ 31 P0 = R0; /* P0 = port */
28 P1 = R1; /* P1 = address */ 32 P1 = R1; /* P1 = address */
29 P2 = R2; /* P2 = count */ 33 P2 = R2; /* P2 = count */
@@ -31,10 +35,12 @@ ENTRY(_outsw)
31 LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; 35 LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
32.Lword_loop_s: R0 = W[P1++]; 36.Lword_loop_s: R0 = W[P1++];
33.Lword_loop_e: W[P0] = R0; 37.Lword_loop_e: W[P0] = R0;
34 RTS; 381: RTS;
35ENDPROC(_outsw) 39ENDPROC(_outsw)
36 40
37ENTRY(_outsb) 41ENTRY(_outsb)
42 CC = R2 == 0;
43 IF CC JUMP 1f;
38 P0 = R0; /* P0 = port */ 44 P0 = R0; /* P0 = port */
39 P1 = R1; /* P1 = address */ 45 P1 = R1; /* P1 = address */
40 P2 = R2; /* P2 = count */ 46 P2 = R2; /* P2 = count */
@@ -42,10 +48,12 @@ ENTRY(_outsb)
42 LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; 48 LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
43.Lbyte_loop_s: R0 = B[P1++]; 49.Lbyte_loop_s: R0 = B[P1++];
44.Lbyte_loop_e: B[P0] = R0; 50.Lbyte_loop_e: B[P0] = R0;
45 RTS; 511: RTS;
46ENDPROC(_outsb) 52ENDPROC(_outsb)
47 53
48ENTRY(_outsw_8) 54ENTRY(_outsw_8)
55 CC = R2 == 0;
56 IF CC JUMP 1f;
49 P0 = R0; /* P0 = port */ 57 P0 = R0; /* P0 = port */
50 P1 = R1; /* P1 = address */ 58 P1 = R1; /* P1 = address */
51 P2 = R2; /* P2 = count */ 59 P2 = R2; /* P2 = count */
@@ -56,5 +64,5 @@ ENTRY(_outsw_8)
56 R0 = R0 << 8; 64 R0 = R0 << 8;
57 R0 = R0 + R1; 65 R0 = R0 + R1;
58.Lword8_loop_e: W[P0] = R0; 66.Lword8_loop_e: W[P0] = R0;
59 RTS; 671: RTS;
60ENDPROC(_outsw_8) 68ENDPROC(_outsw_8)
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index 790c767ca95a..ab4a925a443e 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -58,6 +58,8 @@
581: 581:
59.ifeqs "\flushins", BROK_FLUSH_INST 59.ifeqs "\flushins", BROK_FLUSH_INST
60 \flushins [P0++]; 60 \flushins [P0++];
61 nop;
62 nop;
612: nop; 632: nop;
62.else 64.else
632: \flushins [P0++]; 652: \flushins [P0++];
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f5ecc0566bc2..d88983516e26 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,6 +4,7 @@ config MIPS
4 select HAVE_GENERIC_DMA_COHERENT 4 select HAVE_GENERIC_DMA_COHERENT
5 select HAVE_IDE 5 select HAVE_IDE
6 select HAVE_OPROFILE 6 select HAVE_OPROFILE
7 select HAVE_IRQ_WORK
7 select HAVE_PERF_EVENTS 8 select HAVE_PERF_EVENTS
8 select PERF_USE_VMALLOC 9 select PERF_USE_VMALLOC
9 select HAVE_ARCH_KGDB 10 select HAVE_ARCH_KGDB
@@ -208,6 +209,7 @@ config MACH_JZ4740
208 select ARCH_REQUIRE_GPIOLIB 209 select ARCH_REQUIRE_GPIOLIB
209 select SYS_HAS_EARLY_PRINTK 210 select SYS_HAS_EARLY_PRINTK
210 select HAVE_PWM 211 select HAVE_PWM
212 select HAVE_CLK
211 213
212config LASAT 214config LASAT
213 bool "LASAT Networks platforms" 215 bool "LASAT Networks platforms"
@@ -333,6 +335,8 @@ config PNX8550_STB810
333config PMC_MSP 335config PMC_MSP
334 bool "PMC-Sierra MSP chipsets" 336 bool "PMC-Sierra MSP chipsets"
335 depends on EXPERIMENTAL 337 depends on EXPERIMENTAL
338 select CEVT_R4K
339 select CSRC_R4K
336 select DMA_NONCOHERENT 340 select DMA_NONCOHERENT
337 select SWAP_IO_SPACE 341 select SWAP_IO_SPACE
338 select NO_EXCEPT_FILL 342 select NO_EXCEPT_FILL
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c
index 6398fa95905c..40b84b991191 100644
--- a/arch/mips/alchemy/mtx-1/board_setup.c
+++ b/arch/mips/alchemy/mtx-1/board_setup.c
@@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert);
54 54
55static void mtx1_reset(char *c) 55static void mtx1_reset(char *c)
56{ 56{
57 /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ 57 /* Jump to the reset vector */
58 au_writel(0x00000000, 0xAE00001C); 58 __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
59} 59}
60 60
61static void mtx1_power_off(void) 61static void mtx1_power_off(void)
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c
index e30e42add697..956f946218c5 100644
--- a/arch/mips/alchemy/mtx-1/platform.c
+++ b/arch/mips/alchemy/mtx-1/platform.c
@@ -28,6 +28,8 @@
28#include <linux/mtd/physmap.h> 28#include <linux/mtd/physmap.h>
29#include <mtd/mtd-abi.h> 29#include <mtd/mtd-abi.h>
30 30
31#include <asm/mach-au1x00/au1xxx_eth.h>
32
31static struct gpio_keys_button mtx1_gpio_button[] = { 33static struct gpio_keys_button mtx1_gpio_button[] = {
32 { 34 {
33 .gpio = 207, 35 .gpio = 207,
@@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = {
140 &mtx1_mtd, 142 &mtx1_mtd,
141}; 143};
142 144
145static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
146 .phy_search_highest_addr = 1,
147 .phy1_search_mac0 = 1,
148};
149
143static int __init mtx1_register_devices(void) 150static int __init mtx1_register_devices(void)
144{ 151{
145 int rc; 152 int rc;
146 153
154 au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
155
147 rc = gpio_request(mtx1_gpio_button[0].gpio, 156 rc = gpio_request(mtx1_gpio_button[0].gpio,
148 mtx1_gpio_button[0].desc); 157 mtx1_gpio_button[0].desc);
149 if (rc < 0) { 158 if (rc < 0) {
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c
index b43c918925d3..80c521e5290d 100644
--- a/arch/mips/alchemy/xxs1500/board_setup.c
+++ b/arch/mips/alchemy/xxs1500/board_setup.c
@@ -36,8 +36,8 @@
36 36
37static void xxs1500_reset(char *c) 37static void xxs1500_reset(char *c)
38{ 38{
39 /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ 39 /* Jump to the reset vector */
40 au_writel(0x00000000, 0xAE00001C); 40 __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
41} 41}
42 42
43static void xxs1500_power_off(void) 43static void xxs1500_power_off(void)
diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h
index e00007cf8162..d0c77496c728 100644
--- a/arch/mips/include/asm/perf_event.h
+++ b/arch/mips/include/asm/perf_event.h
@@ -11,15 +11,5 @@
11 11
12#ifndef __MIPS_PERF_EVENT_H__ 12#ifndef __MIPS_PERF_EVENT_H__
13#define __MIPS_PERF_EVENT_H__ 13#define __MIPS_PERF_EVENT_H__
14 14/* Leave it empty here. The file is required by linux/perf_event.h */
15/*
16 * MIPS performance counters do not raise NMI upon overflow, a regular
17 * interrupt will be signaled. Hence we can do the pending perf event
18 * work at the tail of the irq handler.
19 */
20static inline void
21set_perf_event_pending(void)
22{
23}
24
25#endif /* __MIPS_PERF_EVENT_H__ */ 15#endif /* __MIPS_PERF_EVENT_H__ */
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 5a84a1f11231..94ca2b018af7 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -17,29 +17,13 @@
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18#include <asm/uasm.h> 18#include <asm/uasm.h>
19 19
20/* 20#include <asm-generic/sections.h>
21 * If the Instruction Pointer is in module space (0xc0000000), return true;
22 * otherwise, it is in kernel space (0x80000000), return false.
23 *
24 * FIXME: This will not work when the kernel space and module space are the
25 * same. If they are the same, we need to modify scripts/recordmcount.pl,
26 * ftrace_make_nop/call() and the other related parts to ensure the
27 * enabling/disabling of the calling site to _mcount is right for both kernel
28 * and module.
29 */
30
31static inline int in_module(unsigned long ip)
32{
33 return ip & 0x40000000;
34}
35 21
36#ifdef CONFIG_DYNAMIC_FTRACE 22#ifdef CONFIG_DYNAMIC_FTRACE
37 23
38#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 24#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
39#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 25#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
40 26
41#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
42#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
43#define INSN_NOP 0x00000000 /* nop */ 27#define INSN_NOP 0x00000000 /* nop */
44#define INSN_JAL(addr) \ 28#define INSN_JAL(addr) \
45 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) 29 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void)
69#endif 53#endif
70} 54}
71 55
56/*
57 * Check if the address is in kernel space
58 *
59 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
60 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
61 */
62static inline int in_kernel_space(unsigned long ip)
63{
64 if (ip >= (unsigned long)_stext &&
65 ip <= (unsigned long)_etext)
66 return 1;
67 return 0;
68}
69
72static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 70static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
73{ 71{
74 int faulted; 72 int faulted;
@@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
84 return 0; 82 return 0;
85} 83}
86 84
85/*
86 * The details about the calling site of mcount on MIPS
87 *
88 * 1. For kernel:
89 *
90 * move at, ra
91 * jal _mcount --> nop
92 *
93 * 2. For modules:
94 *
95 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
96 *
97 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
98 * addiu v1, v1, low_16bit_of_mcount
99 * move at, ra
100 * move $12, ra_address
101 * jalr v1
102 * sub sp, sp, 8
103 * 1: offset = 5 instructions
104 * 2.2 For the Other situations
105 *
106 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
107 * addiu v1, v1, low_16bit_of_mcount
108 * move at, ra
109 * jalr v1
110 * nop | move $12, ra_address | sub sp, sp, 8
111 * 1: offset = 4 instructions
112 */
113
114#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
115#define MCOUNT_OFFSET_INSNS 5
116#else
117#define MCOUNT_OFFSET_INSNS 4
118#endif
119#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
120
87int ftrace_make_nop(struct module *mod, 121int ftrace_make_nop(struct module *mod,
88 struct dyn_ftrace *rec, unsigned long addr) 122 struct dyn_ftrace *rec, unsigned long addr)
89{ 123{
@@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod,
91 unsigned long ip = rec->ip; 125 unsigned long ip = rec->ip;
92 126
93 /* 127 /*
94 * We have compiled module with -mlong-calls, but compiled the kernel 128 * If ip is in kernel space, no long call, otherwise, long call is
95 * without it, we need to cope with them respectively. 129 * needed.
96 */ 130 */
97 if (in_module(ip)) { 131 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
98#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 132
99 /*
100 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
101 * addiu v1, v1, low_16bit_of_mcount
102 * move at, ra
103 * move $12, ra_address
104 * jalr v1
105 * sub sp, sp, 8
106 * 1: offset = 5 instructions
107 */
108 new = INSN_B_1F_5;
109#else
110 /*
111 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
112 * addiu v1, v1, low_16bit_of_mcount
113 * move at, ra
114 * jalr v1
115 * nop | move $12, ra_address | sub sp, sp, 8
116 * 1: offset = 4 instructions
117 */
118 new = INSN_B_1F_4;
119#endif
120 } else {
121 /*
122 * move at, ra
123 * jal _mcount --> nop
124 */
125 new = INSN_NOP;
126 }
127 return ftrace_modify_code(ip, new); 133 return ftrace_modify_code(ip, new);
128} 134}
129 135
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
132 unsigned int new; 138 unsigned int new;
133 unsigned long ip = rec->ip; 139 unsigned long ip = rec->ip;
134 140
135 /* ip, module: 0xc0000000, kernel: 0x80000000 */ 141 new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
136 new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; 142 insn_lui_v1_hi16_mcount;
137 143
138 return ftrace_modify_code(ip, new); 144 return ftrace_modify_code(ip, new);
139} 145}
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void)
190#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 196#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
191#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ 197#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
192 198
193unsigned long ftrace_get_parent_addr(unsigned long self_addr, 199unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
194 unsigned long parent, 200 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
195 unsigned long parent_addr,
196 unsigned long fp)
197{ 201{
198 unsigned long sp, ip, ra; 202 unsigned long sp, ip, tmp;
199 unsigned int code; 203 unsigned int code;
200 int faulted; 204 int faulted;
201 205
202 /* 206 /*
203 * For module, move the ip from calling site of mcount to the 207 * For module, move the ip from the return address after the
204 * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for 208 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
205 * kernel, move to the instruction "move ra, at"(offset is 12) 209 * kernel, move after the instruction "move ra, at"(offset is 16)
206 */ 210 */
207 ip = self_addr - (in_module(self_addr) ? 20 : 12); 211 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
208 212
209 /* 213 /*
210 * search the text until finding the non-store instruction or "s{d,w} 214 * search the text until finding the non-store instruction or "s{d,w}
211 * ra, offset(sp)" instruction 215 * ra, offset(sp)" instruction
212 */ 216 */
213 do { 217 do {
214 ip -= 4;
215
216 /* get the code at "ip": code = *(unsigned int *)ip; */ 218 /* get the code at "ip": code = *(unsigned int *)ip; */
217 safe_load_code(code, ip, faulted); 219 safe_load_code(code, ip, faulted);
218 220
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
224 * store the ra on the stack 226 * store the ra on the stack
225 */ 227 */
226 if ((code & S_R_SP) != S_R_SP) 228 if ((code & S_R_SP) != S_R_SP)
227 return parent_addr; 229 return parent_ra_addr;
228 230
229 } while (((code & S_RA_SP) != S_RA_SP)); 231 /* Move to the next instruction */
232 ip -= 4;
233 } while ((code & S_RA_SP) != S_RA_SP);
230 234
231 sp = fp + (code & OFFSET_MASK); 235 sp = fp + (code & OFFSET_MASK);
232 236
233 /* ra = *(unsigned long *)sp; */ 237 /* tmp = *(unsigned long *)sp; */
234 safe_load_stack(ra, sp, faulted); 238 safe_load_stack(tmp, sp, faulted);
235 if (unlikely(faulted)) 239 if (unlikely(faulted))
236 return 0; 240 return 0;
237 241
238 if (ra == parent) 242 if (tmp == old_parent_ra)
239 return sp; 243 return sp;
240 return 0; 244 return 0;
241} 245}
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
246 * Hook the return address and push it in the stack of return addrs 250 * Hook the return address and push it in the stack of return addrs
247 * in current thread info. 251 * in current thread info.
248 */ 252 */
249void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 253void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
250 unsigned long fp) 254 unsigned long fp)
251{ 255{
252 unsigned long old; 256 unsigned long old_parent_ra;
253 struct ftrace_graph_ent trace; 257 struct ftrace_graph_ent trace;
254 unsigned long return_hooker = (unsigned long) 258 unsigned long return_hooker = (unsigned long)
255 &return_to_handler; 259 &return_to_handler;
256 int faulted; 260 int faulted, insns;
257 261
258 if (unlikely(atomic_read(&current->tracing_graph_pause))) 262 if (unlikely(atomic_read(&current->tracing_graph_pause)))
259 return; 263 return;
260 264
261 /* 265 /*
262 * "parent" is the stack address saved the return address of the caller 266 * "parent_ra_addr" is the stack address saved the return address of
263 * of _mcount. 267 * the caller of _mcount.
264 * 268 *
265 * if the gcc < 4.5, a leaf function does not save the return address 269 * if the gcc < 4.5, a leaf function does not save the return address
266 * in the stack address, so, we "emulate" one in _mcount's stack space, 270 * in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
275 * do it in ftrace_graph_caller of mcount.S. 279 * do it in ftrace_graph_caller of mcount.S.
276 */ 280 */
277 281
278 /* old = *parent; */ 282 /* old_parent_ra = *parent_ra_addr; */
279 safe_load_stack(old, parent, faulted); 283 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
280 if (unlikely(faulted)) 284 if (unlikely(faulted))
281 goto out; 285 goto out;
282#ifndef KBUILD_MCOUNT_RA_ADDRESS 286#ifndef KBUILD_MCOUNT_RA_ADDRESS
283 parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, 287 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
284 (unsigned long)parent, fp); 288 old_parent_ra, (unsigned long)parent_ra_addr, fp);
285 /* 289 /*
286 * If fails when getting the stack address of the non-leaf function's 290 * If fails when getting the stack address of the non-leaf function's
287 * ra, stop function graph tracer and return 291 * ra, stop function graph tracer and return
288 */ 292 */
289 if (parent == 0) 293 if (parent_ra_addr == 0)
290 goto out; 294 goto out;
291#endif 295#endif
292 /* *parent = return_hooker; */ 296 /* *parent_ra_addr = return_hooker; */
293 safe_store_stack(return_hooker, parent, faulted); 297 safe_store_stack(return_hooker, parent_ra_addr, faulted);
294 if (unlikely(faulted)) 298 if (unlikely(faulted))
295 goto out; 299 goto out;
296 300
297 if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == 301 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
298 -EBUSY) { 302 == -EBUSY) {
299 *parent = old; 303 *parent_ra_addr = old_parent_ra;
300 return; 304 return;
301 } 305 }
302 306
303 trace.func = self_addr; 307 /*
308 * Get the recorded ip of the current mcount calling site in the
309 * __mcount_loc section, which will be used to filter the function
310 * entries configured through the tracing/set_graph_function interface.
311 */
312
313 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
314 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
304 315
305 /* Only trace if the calling function expects to */ 316 /* Only trace if the calling function expects to */
306 if (!ftrace_graph_entry(&trace)) { 317 if (!ftrace_graph_entry(&trace)) {
307 current->curr_ret_stack--; 318 current->curr_ret_stack--;
308 *parent = old; 319 *parent_ra_addr = old_parent_ra;
309 } 320 }
310 return; 321 return;
311out: 322out:
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index 2b7f3f703b83..a8244854d3dc 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event,
161 return ret; 161 return ret;
162} 162}
163 163
164static int mipspmu_enable(struct perf_event *event)
165{
166 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
167 struct hw_perf_event *hwc = &event->hw;
168 int idx;
169 int err = 0;
170
171 /* To look for a free counter for this event. */
172 idx = mipspmu->alloc_counter(cpuc, hwc);
173 if (idx < 0) {
174 err = idx;
175 goto out;
176 }
177
178 /*
179 * If there is an event in the counter we are going to use then
180 * make sure it is disabled.
181 */
182 event->hw.idx = idx;
183 mipspmu->disable_event(idx);
184 cpuc->events[idx] = event;
185
186 /* Set the period for the event. */
187 mipspmu_event_set_period(event, hwc, idx);
188
189 /* Enable the event. */
190 mipspmu->enable_event(hwc, idx);
191
192 /* Propagate our changes to the userspace mapping. */
193 perf_event_update_userpage(event);
194
195out:
196 return err;
197}
198
199static void mipspmu_event_update(struct perf_event *event, 164static void mipspmu_event_update(struct perf_event *event,
200 struct hw_perf_event *hwc, 165 struct hw_perf_event *hwc,
201 int idx) 166 int idx)
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event,
204 unsigned long flags; 169 unsigned long flags;
205 int shift = 64 - TOTAL_BITS; 170 int shift = 64 - TOTAL_BITS;
206 s64 prev_raw_count, new_raw_count; 171 s64 prev_raw_count, new_raw_count;
207 s64 delta; 172 u64 delta;
208 173
209again: 174again:
210 prev_raw_count = local64_read(&hwc->prev_count); 175 prev_raw_count = local64_read(&hwc->prev_count);
@@ -231,32 +196,90 @@ again:
231 return; 196 return;
232} 197}
233 198
234static void mipspmu_disable(struct perf_event *event) 199static void mipspmu_start(struct perf_event *event, int flags)
200{
201 struct hw_perf_event *hwc = &event->hw;
202
203 if (!mipspmu)
204 return;
205
206 if (flags & PERF_EF_RELOAD)
207 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
208
209 hwc->state = 0;
210
211 /* Set the period for the event. */
212 mipspmu_event_set_period(event, hwc, hwc->idx);
213
214 /* Enable the event. */
215 mipspmu->enable_event(hwc, hwc->idx);
216}
217
218static void mipspmu_stop(struct perf_event *event, int flags)
219{
220 struct hw_perf_event *hwc = &event->hw;
221
222 if (!mipspmu)
223 return;
224
225 if (!(hwc->state & PERF_HES_STOPPED)) {
226 /* We are working on a local event. */
227 mipspmu->disable_event(hwc->idx);
228 barrier();
229 mipspmu_event_update(event, hwc, hwc->idx);
230 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
231 }
232}
233
234static int mipspmu_add(struct perf_event *event, int flags)
235{ 235{
236 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 236 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
237 struct hw_perf_event *hwc = &event->hw; 237 struct hw_perf_event *hwc = &event->hw;
238 int idx = hwc->idx; 238 int idx;
239 int err = 0;
239 240
241 perf_pmu_disable(event->pmu);
240 242
241 WARN_ON(idx < 0 || idx >= mipspmu->num_counters); 243 /* To look for a free counter for this event. */
244 idx = mipspmu->alloc_counter(cpuc, hwc);
245 if (idx < 0) {
246 err = idx;
247 goto out;
248 }
242 249
243 /* We are working on a local event. */ 250 /*
251 * If there is an event in the counter we are going to use then
252 * make sure it is disabled.
253 */
254 event->hw.idx = idx;
244 mipspmu->disable_event(idx); 255 mipspmu->disable_event(idx);
256 cpuc->events[idx] = event;
245 257
246 barrier(); 258 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
247 259 if (flags & PERF_EF_START)
248 mipspmu_event_update(event, hwc, idx); 260 mipspmu_start(event, PERF_EF_RELOAD);
249 cpuc->events[idx] = NULL;
250 clear_bit(idx, cpuc->used_mask);
251 261
262 /* Propagate our changes to the userspace mapping. */
252 perf_event_update_userpage(event); 263 perf_event_update_userpage(event);
264
265out:
266 perf_pmu_enable(event->pmu);
267 return err;
253} 268}
254 269
255static void mipspmu_unthrottle(struct perf_event *event) 270static void mipspmu_del(struct perf_event *event, int flags)
256{ 271{
272 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
257 struct hw_perf_event *hwc = &event->hw; 273 struct hw_perf_event *hwc = &event->hw;
274 int idx = hwc->idx;
258 275
259 mipspmu->enable_event(hwc, hwc->idx); 276 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
277
278 mipspmu_stop(event, PERF_EF_UPDATE);
279 cpuc->events[idx] = NULL;
280 clear_bit(idx, cpuc->used_mask);
281
282 perf_event_update_userpage(event);
260} 283}
261 284
262static void mipspmu_read(struct perf_event *event) 285static void mipspmu_read(struct perf_event *event)
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event)
270 mipspmu_event_update(event, hwc, hwc->idx); 293 mipspmu_event_update(event, hwc, hwc->idx);
271} 294}
272 295
273static struct pmu pmu = { 296static void mipspmu_enable(struct pmu *pmu)
274 .enable = mipspmu_enable, 297{
275 .disable = mipspmu_disable, 298 if (mipspmu)
276 .unthrottle = mipspmu_unthrottle, 299 mipspmu->start();
277 .read = mipspmu_read, 300}
278}; 301
302static void mipspmu_disable(struct pmu *pmu)
303{
304 if (mipspmu)
305 mipspmu->stop();
306}
279 307
280static atomic_t active_events = ATOMIC_INIT(0); 308static atomic_t active_events = ATOMIC_INIT(0);
281static DEFINE_MUTEX(pmu_reserve_mutex); 309static DEFINE_MUTEX(pmu_reserve_mutex);
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void)
318 perf_irq = save_perf_irq; 346 perf_irq = save_perf_irq;
319} 347}
320 348
349/*
350 * mipsxx/rm9000/loongson2 have different performance counters, they have
351 * specific low-level init routines.
352 */
353static void reset_counters(void *arg);
354static int __hw_perf_event_init(struct perf_event *event);
355
356static void hw_perf_event_destroy(struct perf_event *event)
357{
358 if (atomic_dec_and_mutex_lock(&active_events,
359 &pmu_reserve_mutex)) {
360 /*
361 * We must not call the destroy function with interrupts
362 * disabled.
363 */
364 on_each_cpu(reset_counters,
365 (void *)(long)mipspmu->num_counters, 1);
366 mipspmu_free_irq();
367 mutex_unlock(&pmu_reserve_mutex);
368 }
369}
370
371static int mipspmu_event_init(struct perf_event *event)
372{
373 int err = 0;
374
375 switch (event->attr.type) {
376 case PERF_TYPE_RAW:
377 case PERF_TYPE_HARDWARE:
378 case PERF_TYPE_HW_CACHE:
379 break;
380
381 default:
382 return -ENOENT;
383 }
384
385 if (!mipspmu || event->cpu >= nr_cpumask_bits ||
386 (event->cpu >= 0 && !cpu_online(event->cpu)))
387 return -ENODEV;
388
389 if (!atomic_inc_not_zero(&active_events)) {
390 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
391 atomic_dec(&active_events);
392 return -ENOSPC;
393 }
394
395 mutex_lock(&pmu_reserve_mutex);
396 if (atomic_read(&active_events) == 0)
397 err = mipspmu_get_irq();
398
399 if (!err)
400 atomic_inc(&active_events);
401 mutex_unlock(&pmu_reserve_mutex);
402 }
403
404 if (err)
405 return err;
406
407 err = __hw_perf_event_init(event);
408 if (err)
409 hw_perf_event_destroy(event);
410
411 return err;
412}
413
414static struct pmu pmu = {
415 .pmu_enable = mipspmu_enable,
416 .pmu_disable = mipspmu_disable,
417 .event_init = mipspmu_event_init,
418 .add = mipspmu_add,
419 .del = mipspmu_del,
420 .start = mipspmu_start,
421 .stop = mipspmu_stop,
422 .read = mipspmu_read,
423};
424
321static inline unsigned int 425static inline unsigned int
322mipspmu_perf_event_encode(const struct mips_perf_event *pev) 426mipspmu_perf_event_encode(const struct mips_perf_event *pev)
323{ 427{
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc,
382{ 486{
383 struct hw_perf_event fake_hwc = event->hw; 487 struct hw_perf_event fake_hwc = event->hw;
384 488
385 if (event->pmu && event->pmu != &pmu) 489 /* Allow mixed event group. So return 1 to pass validation. */
386 return 0; 490 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
491 return 1;
387 492
388 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; 493 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
389} 494}
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event)
409 return 0; 514 return 0;
410} 515}
411 516
412/*
413 * mipsxx/rm9000/loongson2 have different performance counters, they have
414 * specific low-level init routines.
415 */
416static void reset_counters(void *arg);
417static int __hw_perf_event_init(struct perf_event *event);
418
419static void hw_perf_event_destroy(struct perf_event *event)
420{
421 if (atomic_dec_and_mutex_lock(&active_events,
422 &pmu_reserve_mutex)) {
423 /*
424 * We must not call the destroy function with interrupts
425 * disabled.
426 */
427 on_each_cpu(reset_counters,
428 (void *)(long)mipspmu->num_counters, 1);
429 mipspmu_free_irq();
430 mutex_unlock(&pmu_reserve_mutex);
431 }
432}
433
434const struct pmu *hw_perf_event_init(struct perf_event *event)
435{
436 int err = 0;
437
438 if (!mipspmu || event->cpu >= nr_cpumask_bits ||
439 (event->cpu >= 0 && !cpu_online(event->cpu)))
440 return ERR_PTR(-ENODEV);
441
442 if (!atomic_inc_not_zero(&active_events)) {
443 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
444 atomic_dec(&active_events);
445 return ERR_PTR(-ENOSPC);
446 }
447
448 mutex_lock(&pmu_reserve_mutex);
449 if (atomic_read(&active_events) == 0)
450 err = mipspmu_get_irq();
451
452 if (!err)
453 atomic_inc(&active_events);
454 mutex_unlock(&pmu_reserve_mutex);
455 }
456
457 if (err)
458 return ERR_PTR(err);
459
460 err = __hw_perf_event_init(event);
461 if (err)
462 hw_perf_event_destroy(event);
463
464 return err ? ERR_PTR(err) : &pmu;
465}
466
467void hw_perf_enable(void)
468{
469 if (mipspmu)
470 mipspmu->start();
471}
472
473void hw_perf_disable(void)
474{
475 if (mipspmu)
476 mipspmu->stop();
477}
478
479/* This is needed by specific irq handlers in perf_event_*.c */ 517/* This is needed by specific irq handlers in perf_event_*.c */
480static void 518static void
481handle_associated_event(struct cpu_hw_events *cpuc, 519handle_associated_event(struct cpu_hw_events *cpuc,
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc,
496#include "perf_event_mipsxx.c" 534#include "perf_event_mipsxx.c"
497 535
498/* Callchain handling code. */ 536/* Callchain handling code. */
499static inline void
500callchain_store(struct perf_callchain_entry *entry,
501 u64 ip)
502{
503 if (entry->nr < PERF_MAX_STACK_DEPTH)
504 entry->ip[entry->nr++] = ip;
505}
506 537
507/* 538/*
508 * Leave userspace callchain empty for now. When we find a way to trace 539 * Leave userspace callchain empty for now. When we find a way to trace
509 * the user stack callchains, we add here. 540 * the user stack callchains, we add here.
510 */ 541 */
511static void 542void perf_callchain_user(struct perf_callchain_entry *entry,
512perf_callchain_user(struct pt_regs *regs, 543 struct pt_regs *regs)
513 struct perf_callchain_entry *entry)
514{ 544{
515} 545}
516 546
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
523 while (!kstack_end(sp)) { 553 while (!kstack_end(sp)) {
524 addr = *sp++; 554 addr = *sp++;
525 if (__kernel_text_address(addr)) { 555 if (__kernel_text_address(addr)) {
526 callchain_store(entry, addr); 556 perf_callchain_store(entry, addr);
527 if (entry->nr >= PERF_MAX_STACK_DEPTH) 557 if (entry->nr >= PERF_MAX_STACK_DEPTH)
528 break; 558 break;
529 } 559 }
530 } 560 }
531} 561}
532 562
533static void 563void perf_callchain_kernel(struct perf_callchain_entry *entry,
534perf_callchain_kernel(struct pt_regs *regs, 564 struct pt_regs *regs)
535 struct perf_callchain_entry *entry)
536{ 565{
537 unsigned long sp = regs->regs[29]; 566 unsigned long sp = regs->regs[29];
538#ifdef CONFIG_KALLSYMS 567#ifdef CONFIG_KALLSYMS
539 unsigned long ra = regs->regs[31]; 568 unsigned long ra = regs->regs[31];
540 unsigned long pc = regs->cp0_epc; 569 unsigned long pc = regs->cp0_epc;
541 570
542 callchain_store(entry, PERF_CONTEXT_KERNEL);
543 if (raw_show_trace || !__kernel_text_address(pc)) { 571 if (raw_show_trace || !__kernel_text_address(pc)) {
544 unsigned long stack_page = 572 unsigned long stack_page =
545 (unsigned long)task_stack_page(current); 573 (unsigned long)task_stack_page(current);
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs,
549 return; 577 return;
550 } 578 }
551 do { 579 do {
552 callchain_store(entry, pc); 580 perf_callchain_store(entry, pc);
553 if (entry->nr >= PERF_MAX_STACK_DEPTH) 581 if (entry->nr >= PERF_MAX_STACK_DEPTH)
554 break; 582 break;
555 pc = unwind_stack(current, &sp, pc, &ra); 583 pc = unwind_stack(current, &sp, pc, &ra);
556 } while (pc); 584 } while (pc);
557#else 585#else
558 callchain_store(entry, PERF_CONTEXT_KERNEL);
559 save_raw_perf_callchain(entry, sp); 586 save_raw_perf_callchain(entry, sp);
560#endif 587#endif
561} 588}
562
563static void
564perf_do_callchain(struct pt_regs *regs,
565 struct perf_callchain_entry *entry)
566{
567 int is_user;
568
569 if (!regs)
570 return;
571
572 is_user = user_mode(regs);
573
574 if (!current || !current->pid)
575 return;
576
577 if (is_user && current->state != TASK_RUNNING)
578 return;
579
580 if (!is_user) {
581 perf_callchain_kernel(regs, entry);
582 if (current->mm)
583 regs = task_pt_regs(current);
584 else
585 regs = NULL;
586 }
587 if (regs)
588 perf_callchain_user(regs, entry);
589}
590
591static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
592
593struct perf_callchain_entry *
594perf_callchain(struct pt_regs *regs)
595{
596 struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
597
598 entry->nr = 0;
599 perf_do_callchain(regs, entry);
600 return entry;
601}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 183e0d226669..d9a7db78ed62 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
696 * interrupt, not NMI. 696 * interrupt, not NMI.
697 */ 697 */
698 if (handled == IRQ_HANDLED) 698 if (handled == IRQ_HANDLED)
699 perf_event_do_pending(); 699 irq_work_run();
700 700
701#ifdef CONFIG_MIPS_MT_SMP 701#ifdef CONFIG_MIPS_MT_SMP
702 read_unlock(&pmuint_rwlock); 702 read_unlock(&pmuint_rwlock);
@@ -1045,6 +1045,8 @@ init_hw_perf_events(void)
1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq, 1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq,
1046 irq < 0 ? " (share with timer interrupt)" : ""); 1046 irq < 0 ? " (share with timer interrupt)" : "");
1047 1047
1048 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1049
1048 return 0; 1050 return 0;
1049} 1051}
1050early_initcall(init_hw_perf_events); 1052early_initcall(init_hw_perf_events);
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 5922342bca39..dbbe0ce48d89 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
84 84
85static int protected_restore_fp_context(struct sigcontext __user *sc) 85static int protected_restore_fp_context(struct sigcontext __user *sc)
86{ 86{
87 int err, tmp; 87 int err, tmp __maybe_unused;
88 while (1) { 88 while (1) {
89 lock_fpu_owner(); 89 lock_fpu_owner();
90 own_fpu_inatomic(0); 90 own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index a0ed0e052b2e..aae986613795 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
115 115
116static int protected_restore_fp_context32(struct sigcontext32 __user *sc) 116static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
117{ 117{
118 int err, tmp; 118 int err, tmp __maybe_unused;
119 while (1) { 119 while (1) {
120 lock_fpu_owner(); 120 lock_fpu_owner();
121 own_fpu_inatomic(0); 121 own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 383aeb95cb49..32a256101082 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void)
193 */ 193 */
194static struct task_struct *cpu_idle_thread[NR_CPUS]; 194static struct task_struct *cpu_idle_thread[NR_CPUS];
195 195
196struct create_idle {
197 struct work_struct work;
198 struct task_struct *idle;
199 struct completion done;
200 int cpu;
201};
202
203static void __cpuinit do_fork_idle(struct work_struct *work)
204{
205 struct create_idle *c_idle =
206 container_of(work, struct create_idle, work);
207
208 c_idle->idle = fork_idle(c_idle->cpu);
209 complete(&c_idle->done);
210}
211
196int __cpuinit __cpu_up(unsigned int cpu) 212int __cpuinit __cpu_up(unsigned int cpu)
197{ 213{
198 struct task_struct *idle; 214 struct task_struct *idle;
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
203 * Linux can schedule processes on this slave. 219 * Linux can schedule processes on this slave.
204 */ 220 */
205 if (!cpu_idle_thread[cpu]) { 221 if (!cpu_idle_thread[cpu]) {
206 idle = fork_idle(cpu); 222 /*
207 cpu_idle_thread[cpu] = idle; 223 * Schedule work item to avoid forking user task
224 * Ported from arch/x86/kernel/smpboot.c
225 */
226 struct create_idle c_idle = {
227 .cpu = cpu,
228 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
229 };
230
231 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
232 schedule_work(&c_idle.work);
233 wait_for_completion(&c_idle.done);
234 idle = cpu_idle_thread[cpu] = c_idle.idle;
208 235
209 if (IS_ERR(idle)) 236 if (IS_ERR(idle))
210 panic(KERN_ERR "Fork failed for CPU %d", cpu); 237 panic(KERN_ERR "Fork failed for CPU %d", cpu);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 1dc6edff45e0..58beabf50b3c 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -383,12 +383,11 @@ save_static_function(sys_sysmips);
383static int __used noinline 383static int __used noinline
384_sys_sysmips(nabi_no_regargs struct pt_regs regs) 384_sys_sysmips(nabi_no_regargs struct pt_regs regs)
385{ 385{
386 long cmd, arg1, arg2, arg3; 386 long cmd, arg1, arg2;
387 387
388 cmd = regs.regs[4]; 388 cmd = regs.regs[4];
389 arg1 = regs.regs[5]; 389 arg1 = regs.regs[5];
390 arg2 = regs.regs[6]; 390 arg2 = regs.regs[6];
391 arg3 = regs.regs[7];
392 391
393 switch (cmd) { 392 switch (cmd) {
394 case MIPS_ATOMIC_SET: 393 case MIPS_ATOMIC_SET:
@@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs)
405 if (arg1 & 2) 404 if (arg1 & 2)
406 set_thread_flag(TIF_LOGADE); 405 set_thread_flag(TIF_LOGADE);
407 else 406 else
408 clear_thread_flag(TIF_FIXADE); 407 clear_thread_flag(TIF_LOGADE);
409 408
410 return 0; 409 return 0;
411 410
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 6a1fdfef8fde..ab52b7cf3b6b 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -148,9 +148,9 @@ struct {
148 spinlock_t tc_list_lock; 148 spinlock_t tc_list_lock;
149 struct list_head tc_list; /* Thread contexts */ 149 struct list_head tc_list; /* Thread contexts */
150} vpecontrol = { 150} vpecontrol = {
151 .vpe_list_lock = SPIN_LOCK_UNLOCKED, 151 .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
152 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), 152 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
153 .tc_list_lock = SPIN_LOCK_UNLOCKED, 153 .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) 154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
155}; 155};
156 156
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index 6e1b77fec7ea..aca93eed8779 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -1,6 +1,7 @@
1if MACH_LOONGSON
2
1choice 3choice
2 prompt "Machine Type" 4 prompt "Machine Type"
3 depends on MACH_LOONGSON
4 5
5config LEMOTE_FULOONG2E 6config LEMOTE_FULOONG2E
6 bool "Lemote Fuloong(2e) mini-PC" 7 bool "Lemote Fuloong(2e) mini-PC"
@@ -87,3 +88,5 @@ config LOONGSON_UART_BASE
87config LOONGSON_MC146818 88config LOONGSON_MC146818
88 bool 89 bool
89 default n 90 default n
91
92endif # MACH_LOONGSON
diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c
index 1a06defc4f7f..353e1d2e41a5 100644
--- a/arch/mips/loongson/common/cmdline.c
+++ b/arch/mips/loongson/common/cmdline.c
@@ -44,10 +44,5 @@ void __init prom_init_cmdline(void)
44 strcat(arcs_cmdline, " "); 44 strcat(arcs_cmdline, " ");
45 } 45 }
46 46
47 if ((strstr(arcs_cmdline, "console=")) == NULL)
48 strcat(arcs_cmdline, " console=ttyS0,115200");
49 if ((strstr(arcs_cmdline, "root=")) == NULL)
50 strcat(arcs_cmdline, " root=/dev/hda1");
51
52 prom_init_machtype(); 47 prom_init_machtype();
53} 48}
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c
index 81fbe6b73f91..2efd5d9dee27 100644
--- a/arch/mips/loongson/common/machtype.c
+++ b/arch/mips/loongson/common/machtype.c
@@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void)
41 41
42void __init prom_init_machtype(void) 42void __init prom_init_machtype(void)
43{ 43{
44 char *p, str[MACHTYPE_LEN]; 44 char *p, str[MACHTYPE_LEN + 1];
45 int machtype = MACH_LEMOTE_FL2E; 45 int machtype = MACH_LEMOTE_FL2E;
46 46
47 mips_machtype = LOONGSON_MACHTYPE; 47 mips_machtype = LOONGSON_MACHTYPE;
@@ -53,6 +53,7 @@ void __init prom_init_machtype(void)
53 } 53 }
54 p += strlen("machtype="); 54 p += strlen("machtype=");
55 strncpy(str, p, MACHTYPE_LEN); 55 strncpy(str, p, MACHTYPE_LEN);
56 str[MACHTYPE_LEN] = '\0';
56 p = strstr(str, " "); 57 p = strstr(str, " ");
57 if (p) 58 if (p)
58 *p = '\0'; 59 *p = '\0';
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 2701d9500959..2a7d43f4f161 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -70,7 +70,7 @@
70 70
71 71
72#define COMPXSP \ 72#define COMPXSP \
73 unsigned xm; int xe; int xs; int xc 73 unsigned xm; int xe; int xs __maybe_unused; int xc
74 74
75#define COMPYSP \ 75#define COMPYSP \
76 unsigned ym; int ye; int ys; int yc 76 unsigned ym; int ye; int ys; int yc
@@ -104,7 +104,7 @@
104 104
105 105
106#define COMPXDP \ 106#define COMPXDP \
107u64 xm; int xe; int xs; int xc 107u64 xm; int xe; int xs __maybe_unused; int xc
108 108
109#define COMPYDP \ 109#define COMPYDP \
110u64 ym; int ye; int ys; int yc 110u64 ym; int ye; int ys; int yc
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 2efcbd24c82f..279599e9a779 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr)
324void __init paging_init(void) 324void __init paging_init(void)
325{ 325{
326 unsigned long max_zone_pfns[MAX_NR_ZONES]; 326 unsigned long max_zone_pfns[MAX_NR_ZONES];
327 unsigned long lastpfn; 327 unsigned long lastpfn __maybe_unused;
328 328
329 pagetable_init(); 329 pagetable_init();
330 330
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 083d3412d0bc..04f9e17db9d0 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -109,6 +109,8 @@ static bool scratchpad_available(void)
109static int scratchpad_offset(int i) 109static int scratchpad_offset(int i)
110{ 110{
111 BUG(); 111 BUG();
112 /* Really unreachable, but evidently some GCC want this. */
113 return 0;
112} 114}
113#endif 115#endif
114/* 116/*
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index b7c03d80c88c..68798f869c0f 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -308,7 +308,7 @@ static struct resource pci_mem_resource = {
308 * RETURNS: PCIBIOS_SUCCESSFUL - success 308 * RETURNS: PCIBIOS_SUCCESSFUL - success
309 * 309 *
310 ****************************************************************************/ 310 ****************************************************************************/
311static int bpci_interrupt(int irq, void *dev_id) 311static irqreturn_t bpci_interrupt(int irq, void *dev_id)
312{ 312{
313 struct msp_pci_regs *preg = (void *)PCI_BASE_REG; 313 struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
314 unsigned int stat = preg->if_status; 314 unsigned int stat = preg->if_status;
@@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id)
326 /* write to clear all asserted interrupts */ 326 /* write to clear all asserted interrupts */
327 preg->if_status = stat; 327 preg->if_status = stat;
328 328
329 return PCIBIOS_SUCCESSFUL; 329 return IRQ_HANDLED;
330} 330}
331 331
332/***************************************************************************** 332/*****************************************************************************
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig
index c139988bb85d..8d798497c614 100644
--- a/arch/mips/pmc-sierra/Kconfig
+++ b/arch/mips/pmc-sierra/Kconfig
@@ -4,15 +4,11 @@ choice
4 4
5config PMC_MSP4200_EVAL 5config PMC_MSP4200_EVAL
6 bool "PMC-Sierra MSP4200 Eval Board" 6 bool "PMC-Sierra MSP4200 Eval Board"
7 select CEVT_R4K
8 select CSRC_R4K
9 select IRQ_MSP_SLP 7 select IRQ_MSP_SLP
10 select HW_HAS_PCI 8 select HW_HAS_PCI
11 9
12config PMC_MSP4200_GW 10config PMC_MSP4200_GW
13 bool "PMC-Sierra MSP4200 VoIP Gateway" 11 bool "PMC-Sierra MSP4200 VoIP Gateway"
14 select CEVT_R4K
15 select CSRC_R4K
16 select IRQ_MSP_SLP 12 select IRQ_MSP_SLP
17 select HW_HAS_PCI 13 select HW_HAS_PCI
18 14
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c
index cca64e15f57f..01df84ce31e2 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_time.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c
@@ -81,7 +81,7 @@ void __init plat_time_init(void)
81 mips_hpt_frequency = cpu_rate/2; 81 mips_hpt_frequency = cpu_rate/2;
82} 82}
83 83
84unsigned int __init get_c0_compare_int(void) 84unsigned int __cpuinit get_c0_compare_int(void)
85{ 85{
86 return MSP_INT_VPE0_TIMER; 86 return MSP_INT_VPE0_TIMER;
87} 87}
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 92d2f9298e38..9d773a639513 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m,
139 * Atomically reads the value of @v. Note that the guaranteed 139 * Atomically reads the value of @v. Note that the guaranteed
140 * useful range of an atomic_t is only 24 bits. 140 * useful range of an atomic_t is only 24 bits.
141 */ 141 */
142#define atomic_read(v) ((v)->counter) 142#define atomic_read(v) (ACCESS_ONCE((v)->counter))
143 143
144/** 144/**
145 * atomic_set - set atomic variable 145 * atomic_set - set atomic variable
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 679dee0bbd08..3d6e60dad9d9 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -160,9 +160,10 @@ struct __large_struct { unsigned long buf[100]; };
160 160
161#define __get_user_check(x, ptr, size) \ 161#define __get_user_check(x, ptr, size) \
162({ \ 162({ \
163 const __typeof__(ptr) __guc_ptr = (ptr); \
163 int _e; \ 164 int _e; \
164 if (likely(__access_ok((unsigned long) (ptr), (size)))) \ 165 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
165 _e = __get_user_nocheck((x), (ptr), (size)); \ 166 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
166 else { \ 167 else { \
167 _e = -EFAULT; \ 168 _e = -EFAULT; \
168 (x) = (__typeof__(x))0; \ 169 (x) = (__typeof__(x))0; \
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
index a8933a60b2d4..a6b63dde603d 100644
--- a/arch/mn10300/mm/cache-inv-icache.c
+++ b/arch/mn10300/mm/cache-inv-icache.c
@@ -69,7 +69,7 @@ static void flush_icache_page_range(unsigned long start, unsigned long end)
69 69
70 /* invalidate the icache coverage on that region */ 70 /* invalidate the icache coverage on that region */
71 mn10300_local_icache_inv_range2(addr + off, size); 71 mn10300_local_icache_inv_range2(addr + off, size);
72 smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); 72 smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
73} 73}
74 74
75/** 75/**
@@ -101,7 +101,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
101 * directly */ 101 * directly */
102 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; 102 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
103 mn10300_icache_inv_range(start_page, end); 103 mn10300_icache_inv_range(start_page, end);
104 smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); 104 smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
105 if (start_page == start) 105 if (start_page == start)
106 goto done; 106 goto done;
107 end = start_page; 107 end = start_page;
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 380d48bacd16..26b8c807f8f1 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -33,9 +33,25 @@
33// 33//
34//---------------------------------------------------------------------------- 34//----------------------------------------------------------------------------
35#include <linux/cache.h> 35#include <linux/cache.h>
36#include <linux/threads.h>
36#include <asm/types.h> 37#include <asm/types.h>
37#include <asm/mmu.h> 38#include <asm/mmu.h>
38 39
40/*
41 * We only have to have statically allocated lppaca structs on
42 * legacy iSeries, which supports at most 64 cpus.
43 */
44#ifdef CONFIG_PPC_ISERIES
45#if NR_CPUS < 64
46#define NR_LPPACAS NR_CPUS
47#else
48#define NR_LPPACAS 64
49#endif
50#else /* not iSeries */
51#define NR_LPPACAS 1
52#endif
53
54
39/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k 55/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
40 * alignment is sufficient to prevent this */ 56 * alignment is sufficient to prevent this */
41struct lppaca { 57struct lppaca {
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 991d5998d6be..fe56a23e1ff0 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -240,6 +240,12 @@ struct machdep_calls {
240 * claims to support kexec. 240 * claims to support kexec.
241 */ 241 */
242 int (*machine_kexec_prepare)(struct kimage *image); 242 int (*machine_kexec_prepare)(struct kimage *image);
243
244 /* Called to perform the _real_ kexec.
245 * Do NOT allocate memory or fail here. We are past the point of
246 * no return.
247 */
248 void (*machine_kexec)(struct kimage *image);
243#endif /* CONFIG_KEXEC */ 249#endif /* CONFIG_KEXEC */
244 250
245#ifdef CONFIG_SUSPEND 251#ifdef CONFIG_SUSPEND
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 49a170af8145..a5f8672eeff3 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
87 87
88 save_ftrace_enabled = __ftrace_enabled_save(); 88 save_ftrace_enabled = __ftrace_enabled_save();
89 89
90 default_machine_kexec(image); 90 if (ppc_md.machine_kexec)
91 ppc_md.machine_kexec(image);
92 else
93 default_machine_kexec(image);
91 94
92 __ftrace_enabled_restore(save_ftrace_enabled); 95 __ftrace_enabled_restore(save_ftrace_enabled);
93 96
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index ebf9846f3c3b..f4adf89d7614 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -27,20 +27,6 @@ extern unsigned long __toc_start;
27#ifdef CONFIG_PPC_BOOK3S 27#ifdef CONFIG_PPC_BOOK3S
28 28
29/* 29/*
30 * We only have to have statically allocated lppaca structs on
31 * legacy iSeries, which supports at most 64 cpus.
32 */
33#ifdef CONFIG_PPC_ISERIES
34#if NR_CPUS < 64
35#define NR_LPPACAS NR_CPUS
36#else
37#define NR_LPPACAS 64
38#endif
39#else /* not iSeries */
40#define NR_LPPACAS 1
41#endif
42
43/*
44 * The structure which the hypervisor knows about - this structure 30 * The structure which the hypervisor knows about - this structure
45 * should not cross a page boundary. The vpa_init/register_vpa call 31 * should not cross a page boundary. The vpa_init/register_vpa call
46 * is now known to fail if the lppaca structure crosses a page 32 * is now known to fail if the lppaca structure crosses a page
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7a1d5cb76932..8303a6c65ef7 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
353 prime_debug_regs(new_thread); 353 prime_debug_regs(new_thread);
354} 354}
355#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 355#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
356#ifndef CONFIG_HAVE_HW_BREAKPOINT
356static void set_debug_reg_defaults(struct thread_struct *thread) 357static void set_debug_reg_defaults(struct thread_struct *thread)
357{ 358{
358 if (thread->dabr) { 359 if (thread->dabr) {
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
360 set_dabr(0); 361 set_dabr(0);
361 } 362 }
362} 363}
364#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
363#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 365#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
364 366
365int set_dabr(unsigned long dabr) 367int set_dabr(unsigned long dabr)
@@ -670,11 +672,11 @@ void flush_thread(void)
670{ 672{
671 discard_lazy_cpu_state(); 673 discard_lazy_cpu_state();
672 674
673#ifdef CONFIG_HAVE_HW_BREAKPOINTS 675#ifdef CONFIG_HAVE_HW_BREAKPOINT
674 flush_ptrace_hw_breakpoint(current); 676 flush_ptrace_hw_breakpoint(current);
675#else /* CONFIG_HAVE_HW_BREAKPOINTS */ 677#else /* CONFIG_HAVE_HW_BREAKPOINT */
676 set_debug_reg_defaults(&current->thread); 678 set_debug_reg_defaults(&current->thread);
677#endif /* CONFIG_HAVE_HW_BREAKPOINTS */ 679#endif /* CONFIG_HAVE_HW_BREAKPOINT */
678} 680}
679 681
680void 682void
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index fd4812329570..0dc95c0aa3be 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1516,7 +1516,8 @@ int start_topology_update(void)
1516{ 1516{
1517 int rc = 0; 1517 int rc = 0;
1518 1518
1519 if (firmware_has_feature(FW_FEATURE_VPHN) && 1519 /* Disabled until races with load balancing are fixed */
1520 if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
1520 get_lppaca()->shared_proc) { 1521 get_lppaca()->shared_proc) {
1521 vphn_enabled = 1; 1522 vphn_enabled = 1;
1522 setup_cpu_associativity_change_counters(); 1523 setup_cpu_associativity_change_counters();
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 1ec06576f619..c14d09f614f3 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38 * neesd to be flushed. This function will either perform the flush 38 * neesd to be flushed. This function will either perform the flush
39 * immediately or will batch it up if the current CPU has an active 39 * immediately or will batch it up if the current CPU has an active
40 * batch on it. 40 * batch on it.
41 *
42 * Must be called from within some kind of spinlock/non-preempt region...
43 */ 41 */
44void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 42void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep, unsigned long pte, int huge) 43 pte_t *ptep, unsigned long pte, int huge)
46{ 44{
47 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
48 unsigned long vsid, vaddr; 46 unsigned long vsid, vaddr;
49 unsigned int psize; 47 unsigned int psize;
50 int ssize; 48 int ssize;
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
99 */ 97 */
100 if (!batch->active) { 98 if (!batch->active) {
101 flush_hash_page(vaddr, rpte, psize, ssize, 0); 99 flush_hash_page(vaddr, rpte, psize, ssize, 0);
100 put_cpu_var(ppc64_tlb_batch);
102 return; 101 return;
103 } 102 }
104 103
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
127 batch->index = ++i; 126 batch->index = ++i;
128 if (i >= PPC64_TLB_BATCH_NR) 127 if (i >= PPC64_TLB_BATCH_NR)
129 __flush_tlb_pending(batch); 128 __flush_tlb_pending(batch);
129 put_cpu_var(ppc64_tlb_batch);
130} 130}
131 131
132/* 132/*
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index fdb7384c0c4f..f0491cc28900 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ 242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); 243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
244 244
245 for (i = 0; i < NR_CPUS; i++) { 245 for (i = 0; i < NR_LPPACAS; i++) {
246 if (lppaca_of(i).dyn_proc_status >= 2) 246 if (lppaca[i].dyn_proc_status >= 2)
247 continue; 247 continue;
248 248
249 snprintf(p, 32 - (p - buf), "@%d", i); 249 snprintf(p, 32 - (p - buf), "@%d", i);
@@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
251 251
252 dt_prop_str(dt, "device_type", device_type_cpu); 252 dt_prop_str(dt, "device_type", device_type_cpu);
253 253
254 index = lppaca_of(i).dyn_hv_phys_proc_index; 254 index = lppaca[i].dyn_hv_phys_proc_index;
255 d = &xIoHriProcessorVpd[index]; 255 d = &xIoHriProcessorVpd[index];
256 256
257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); 257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index b0863410517f..2946ae10fbfd 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void)
680 * on but calling this function multiple times is fine. 680 * on but calling this function multiple times is fine.
681 */ 681 */
682 identify_cpu(0, mfspr(SPRN_PVR)); 682 identify_cpu(0, mfspr(SPRN_PVR));
683 initialise_paca(&boot_paca, 0);
683 684
684 powerpc_firmware_features |= FW_FEATURE_ISERIES; 685 powerpc_firmware_features |= FW_FEATURE_ISERIES;
685 powerpc_firmware_features |= FW_FEATURE_LPAR; 686 powerpc_firmware_features |= FW_FEATURE_LPAR;
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index a78701da775b..4a5350037c8f 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -3,7 +3,7 @@
3 3
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern void __nosave_begin, __nosave_end; 6extern long __nosave_begin, __nosave_end;
7extern long __machvec_start, __machvec_end; 7extern long __machvec_start, __machvec_end;
8extern char __uncached_start, __uncached_end; 8extern char __uncached_start, __uncached_end;
9extern char _ebss[]; 9extern char _ebss[];
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 672944f5b19c..e53b4b38bd11 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -14,7 +14,7 @@
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/sh_timer.h> 15#include <linux/sh_timer.h>
16#include <linux/serial_sci.h> 16#include <linux/serial_sci.h>
17#include <asm/machtypes.h> 17#include <generated/machtypes.h>
18 18
19static struct resource rtc_resources[] = { 19static struct resource rtc_resources[] = {
20 [0] = { 20 [0] = {
@@ -255,12 +255,17 @@ static struct platform_device *sh7750_early_devices[] __initdata = {
255 255
256void __init plat_early_device_setup(void) 256void __init plat_early_device_setup(void)
257{ 257{
258 struct platform_device *dev[1];
259
258 if (mach_is_rts7751r2d()) { 260 if (mach_is_rts7751r2d()) {
259 scif_platform_data.scscr |= SCSCR_CKE1; 261 scif_platform_data.scscr |= SCSCR_CKE1;
260 early_platform_add_devices(&scif_device, 1); 262 dev[0] = &scif_device;
263 early_platform_add_devices(dev, 1);
261 } else { 264 } else {
262 early_platform_add_devices(&sci_device, 1); 265 dev[0] = &sci_device;
263 early_platform_add_devices(&scif_device, 1); 266 early_platform_add_devices(dev, 1);
267 dev[0] = &scif_device;
268 early_platform_add_devices(dev, 1);
264 } 269 }
265 270
266 early_platform_add_devices(sh7750_early_devices, 271 early_platform_add_devices(sh7750_early_devices,
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index faa8f86c0db4..0901b2f14e15 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -10,6 +10,16 @@
10void __delay(unsigned long loops) 10void __delay(unsigned long loops)
11{ 11{
12 __asm__ __volatile__( 12 __asm__ __volatile__(
13 /*
14 * ST40-300 appears to have an issue with this code,
15 * normally taking two cycles each loop, as with all
16 * other SH variants. If however the branch and the
17 * delay slot straddle an 8 byte boundary, this increases
18 * to 3 cycles.
19 * This align directive ensures this doesn't occur.
20 */
21 ".balign 8\n\t"
22
13 "tst %0, %0\n\t" 23 "tst %0, %0\n\t"
14 "1:\t" 24 "1:\t"
15 "bf/s 1b\n\t" 25 "bf/s 1b\n\t"
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 88d3dc3d30d5..5a580ea04429 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from,
108 kunmap_atomic(vfrom, KM_USER0); 108 kunmap_atomic(vfrom, KM_USER0);
109 } 109 }
110 110
111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) 111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
112 (vma->vm_flags & VM_EXEC))
112 __flush_purge_region(vto, PAGE_SIZE); 113 __flush_purge_region(vto, PAGE_SIZE);
113 114
114 kunmap_atomic(vto, KM_USER1); 115 kunmap_atomic(vto, KM_USER1);
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index 646aa78ba5fd..46a823882437 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -62,7 +62,12 @@ int main(int argc, char *argv[])
62 if (fseek(f, -4L, SEEK_END)) { 62 if (fseek(f, -4L, SEEK_END)) {
63 perror(argv[1]); 63 perror(argv[1]);
64 } 64 }
65 fread(&olen, sizeof olen, 1, f); 65
66 if (fread(&olen, sizeof(olen), 1, f) != 1) {
67 perror(argv[1]);
68 return 1;
69 }
70
66 ilen = ftell(f); 71 ilen = ftell(f);
67 olen = getle32(&olen); 72 olen = getle32(&olen);
68 fclose(f); 73 fclose(f);
diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h
new file mode 100644
index 000000000000..e656ad8c0a2e
--- /dev/null
+++ b/arch/x86/include/asm/ce4100.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_CE4100_H_
2#define _ASM_CE4100_H_
3
4int ce4100_pci_init(void);
5
6#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4d0dfa0d998e..43a18c77676d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -36,6 +36,11 @@
36#define MSR_IA32_PERFCTR1 0x000000c2 36#define MSR_IA32_PERFCTR1 0x000000c2
37#define MSR_FSB_FREQ 0x000000cd 37#define MSR_FSB_FREQ 0x000000cd
38 38
39#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
40#define NHM_C3_AUTO_DEMOTE (1UL << 25)
41#define NHM_C1_AUTO_DEMOTE (1UL << 26)
42#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
43
39#define MSR_MTRRcap 0x000000fe 44#define MSR_MTRRcap 0x000000fe
40#define MSR_IA32_BBL_CR_CTL 0x00000119 45#define MSR_IA32_BBL_CR_CTL 0x00000119
41 46
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index ce1d54c8a433..3e094af443c3 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -176,7 +176,7 @@ struct bau_msg_payload {
176struct bau_msg_header { 176struct bau_msg_header {
177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
178 /* bits 5:0 */ 178 /* bits 5:0 */
179 unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ 179 unsigned int base_dest_nodeid:15; /* nasid of the */
180 /* bits 20:6 */ /* first bit in uvhub map */ 180 /* bits 20:6 */ /* first bit in uvhub map */
181 unsigned int command:8; /* message type */ 181 unsigned int command:8; /* message type */
182 /* bits 28:21 */ 182 /* bits 28:21 */
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 13a389179514..452932d34730 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void)
106 addr += size; 106 addr += size;
107 } 107 }
108 108
109 printk(KERN_INFO "Scanning %d areas for low memory corruption\n", 109 if (num_scan_areas)
110 num_scan_areas); 110 printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas);
111} 111}
112 112
113 113
@@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy)
143{ 143{
144 check_for_bios_corruption(); 144 check_for_bios_corruption();
145 schedule_delayed_work(&bios_check_work, 145 schedule_delayed_work(&bios_check_work,
146 round_jiffies_relative(corruption_check_period*HZ)); 146 round_jiffies_relative(corruption_check_period*HZ));
147} 147}
148 148
149static int start_periodic_check_for_corruption(void) 149static int start_periodic_check_for_corruption(void)
150{ 150{
151 if (!memory_corruption_check || corruption_check_period == 0) 151 if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0)
152 return 0; 152 return 0;
153 153
154 printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", 154 printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index bd1cac747f67..52c93648e492 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
158{ 158{
159 if (c->x86 == 0x06) { 159 if (c->x86 == 0x06) {
160 if (cpu_has(c, X86_FEATURE_EST)) 160 if (cpu_has(c, X86_FEATURE_EST))
161 printk(KERN_WARNING PFX "Warning: EST-capable CPU " 161 printk_once(KERN_WARNING PFX "Warning: EST-capable "
162 "detected. The acpi-cpufreq module offers " 162 "CPU detected. The acpi-cpufreq module offers "
163 "voltage scaling in addition of frequency " 163 "voltage scaling in addition to frequency "
164 "scaling. You should use that instead of " 164 "scaling. You should use that instead of "
165 "p4-clockmod, if possible.\n"); 165 "p4-clockmod, if possible.\n");
166 switch (c->x86_model) { 166 switch (c->x86_model) {
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
index 4f6f679f2799..4a5a42b842ad 100644
--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -195,7 +195,7 @@ static unsigned int pcc_get_freq(unsigned int cpu)
195cmd_incomplete: 195cmd_incomplete:
196 iowrite16(0, &pcch_hdr->status); 196 iowrite16(0, &pcch_hdr->status);
197 spin_unlock(&pcc_lock); 197 spin_unlock(&pcc_lock);
198 return -EINVAL; 198 return 0;
199} 199}
200 200
201static int pcc_cpufreq_target(struct cpufreq_policy *policy, 201static int pcc_cpufreq_target(struct cpufreq_policy *policy,
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 35c7e65e59be..c567dec854f6 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = {
1537static int __cpuinit powernowk8_init(void) 1537static int __cpuinit powernowk8_init(void)
1538{ 1538{
1539 unsigned int i, supported_cpus = 0, cpu; 1539 unsigned int i, supported_cpus = 0, cpu;
1540 int rv;
1540 1541
1541 for_each_online_cpu(i) { 1542 for_each_online_cpu(i) {
1542 int rc; 1543 int rc;
@@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void)
1555 1556
1556 cpb_capable = true; 1557 cpb_capable = true;
1557 1558
1558 register_cpu_notifier(&cpb_nb);
1559
1560 msrs = msrs_alloc(); 1559 msrs = msrs_alloc();
1561 if (!msrs) { 1560 if (!msrs) {
1562 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); 1561 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
1563 return -ENOMEM; 1562 return -ENOMEM;
1564 } 1563 }
1565 1564
1565 register_cpu_notifier(&cpb_nb);
1566
1566 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); 1567 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1567 1568
1568 for_each_cpu(cpu, cpu_online_mask) { 1569 for_each_cpu(cpu, cpu_online_mask) {
@@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void)
1574 (cpb_enabled ? "on" : "off")); 1575 (cpb_enabled ? "on" : "off"));
1575 } 1576 }
1576 1577
1577 return cpufreq_register_driver(&cpufreq_amd64_driver); 1578 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1579 if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
1580 unregister_cpu_notifier(&cpb_nb);
1581 msrs_free(msrs);
1582 msrs = NULL;
1583 }
1584 return rv;
1578} 1585}
1579 1586
1580/* driver entry point for term */ 1587/* driver entry point for term */
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 7d90ceb882a4..20e3f8702d1e 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -229,15 +229,14 @@ void vmalloc_sync_all(void)
229 for (address = VMALLOC_START & PMD_MASK; 229 for (address = VMALLOC_START & PMD_MASK;
230 address >= TASK_SIZE && address < FIXADDR_TOP; 230 address >= TASK_SIZE && address < FIXADDR_TOP;
231 address += PMD_SIZE) { 231 address += PMD_SIZE) {
232
233 unsigned long flags;
234 struct page *page; 232 struct page *page;
235 233
236 spin_lock_irqsave(&pgd_lock, flags); 234 spin_lock(&pgd_lock);
237 list_for_each_entry(page, &pgd_list, lru) { 235 list_for_each_entry(page, &pgd_list, lru) {
238 spinlock_t *pgt_lock; 236 spinlock_t *pgt_lock;
239 pmd_t *ret; 237 pmd_t *ret;
240 238
239 /* the pgt_lock only for Xen */
241 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 240 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
242 241
243 spin_lock(pgt_lock); 242 spin_lock(pgt_lock);
@@ -247,7 +246,7 @@ void vmalloc_sync_all(void)
247 if (!ret) 246 if (!ret)
248 break; 247 break;
249 } 248 }
250 spin_unlock_irqrestore(&pgd_lock, flags); 249 spin_unlock(&pgd_lock);
251 } 250 }
252} 251}
253 252
@@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
828 unsigned long address, unsigned int fault) 827 unsigned long address, unsigned int fault)
829{ 828{
830 if (fault & VM_FAULT_OOM) { 829 if (fault & VM_FAULT_OOM) {
830 /* Kernel mode? Handle exceptions or die: */
831 if (!(error_code & PF_USER)) {
832 up_read(&current->mm->mmap_sem);
833 no_context(regs, error_code, address);
834 return;
835 }
836
831 out_of_memory(regs, error_code, address); 837 out_of_memory(regs, error_code, address);
832 } else { 838 } else {
833 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 839 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 71a59296af80..c14a5422e152 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)
105 105
106 for (address = start; address <= end; address += PGDIR_SIZE) { 106 for (address = start; address <= end; address += PGDIR_SIZE) {
107 const pgd_t *pgd_ref = pgd_offset_k(address); 107 const pgd_t *pgd_ref = pgd_offset_k(address);
108 unsigned long flags;
109 struct page *page; 108 struct page *page;
110 109
111 if (pgd_none(*pgd_ref)) 110 if (pgd_none(*pgd_ref))
112 continue; 111 continue;
113 112
114 spin_lock_irqsave(&pgd_lock, flags); 113 spin_lock(&pgd_lock);
115 list_for_each_entry(page, &pgd_list, lru) { 114 list_for_each_entry(page, &pgd_list, lru) {
116 pgd_t *pgd; 115 pgd_t *pgd;
117 spinlock_t *pgt_lock; 116 spinlock_t *pgt_lock;
118 117
119 pgd = (pgd_t *)page_address(page) + pgd_index(address); 118 pgd = (pgd_t *)page_address(page) + pgd_index(address);
119 /* the pgt_lock only for Xen */
120 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 120 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
121 spin_lock(pgt_lock); 121 spin_lock(pgt_lock);
122 122
@@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
128 128
129 spin_unlock(pgt_lock); 129 spin_unlock(pgt_lock);
130 } 130 }
131 spin_unlock_irqrestore(&pgd_lock, flags); 131 spin_unlock(&pgd_lock);
132 } 132 }
133} 133}
134 134
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 95ea1551eebc..1337c51b07d7 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -780,11 +780,7 @@ void __cpuinit numa_add_cpu(int cpu)
780 int physnid; 780 int physnid;
781 int nid = NUMA_NO_NODE; 781 int nid = NUMA_NO_NODE;
782 782
783 apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 783 nid = early_cpu_to_node(cpu);
784 if (apicid != BAD_APICID)
785 nid = apicid_to_node[apicid];
786 if (nid == NUMA_NO_NODE)
787 nid = early_cpu_to_node(cpu);
788 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); 784 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
789 785
790 /* 786 /*
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index d343b3c81f3c..90825f2eb0f4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -57,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM];
57 57
58void update_page_count(int level, unsigned long pages) 58void update_page_count(int level, unsigned long pages)
59{ 59{
60 unsigned long flags;
61
62 /* Protect against CPA */ 60 /* Protect against CPA */
63 spin_lock_irqsave(&pgd_lock, flags); 61 spin_lock(&pgd_lock);
64 direct_pages_count[level] += pages; 62 direct_pages_count[level] += pages;
65 spin_unlock_irqrestore(&pgd_lock, flags); 63 spin_unlock(&pgd_lock);
66} 64}
67 65
68static void split_page_count(int level) 66static void split_page_count(int level)
@@ -394,7 +392,7 @@ static int
394try_preserve_large_page(pte_t *kpte, unsigned long address, 392try_preserve_large_page(pte_t *kpte, unsigned long address,
395 struct cpa_data *cpa) 393 struct cpa_data *cpa)
396{ 394{
397 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; 395 unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
398 pte_t new_pte, old_pte, *tmp; 396 pte_t new_pte, old_pte, *tmp;
399 pgprot_t old_prot, new_prot, req_prot; 397 pgprot_t old_prot, new_prot, req_prot;
400 int i, do_split = 1; 398 int i, do_split = 1;
@@ -403,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
403 if (cpa->force_split) 401 if (cpa->force_split)
404 return 1; 402 return 1;
405 403
406 spin_lock_irqsave(&pgd_lock, flags); 404 spin_lock(&pgd_lock);
407 /* 405 /*
408 * Check for races, another CPU might have split this page 406 * Check for races, another CPU might have split this page
409 * up already: 407 * up already:
@@ -498,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
498 } 496 }
499 497
500out_unlock: 498out_unlock:
501 spin_unlock_irqrestore(&pgd_lock, flags); 499 spin_unlock(&pgd_lock);
502 500
503 return do_split; 501 return do_split;
504} 502}
505 503
506static int split_large_page(pte_t *kpte, unsigned long address) 504static int split_large_page(pte_t *kpte, unsigned long address)
507{ 505{
508 unsigned long flags, pfn, pfninc = 1; 506 unsigned long pfn, pfninc = 1;
509 unsigned int i, level; 507 unsigned int i, level;
510 pte_t *pbase, *tmp; 508 pte_t *pbase, *tmp;
511 pgprot_t ref_prot; 509 pgprot_t ref_prot;
@@ -519,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
519 if (!base) 517 if (!base)
520 return -ENOMEM; 518 return -ENOMEM;
521 519
522 spin_lock_irqsave(&pgd_lock, flags); 520 spin_lock(&pgd_lock);
523 /* 521 /*
524 * Check for races, another CPU might have split this page 522 * Check for races, another CPU might have split this page
525 * up for us already: 523 * up for us already:
@@ -591,7 +589,7 @@ out_unlock:
591 */ 589 */
592 if (base) 590 if (base)
593 __free_page(base); 591 __free_page(base);
594 spin_unlock_irqrestore(&pgd_lock, flags); 592 spin_unlock(&pgd_lock);
595 593
596 return 0; 594 return 0;
597} 595}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 500242d3c96d..0113d19c8aa6 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
121 121
122static void pgd_dtor(pgd_t *pgd) 122static void pgd_dtor(pgd_t *pgd)
123{ 123{
124 unsigned long flags; /* can be called from interrupt context */
125
126 if (SHARED_KERNEL_PMD) 124 if (SHARED_KERNEL_PMD)
127 return; 125 return;
128 126
129 spin_lock_irqsave(&pgd_lock, flags); 127 spin_lock(&pgd_lock);
130 pgd_list_del(pgd); 128 pgd_list_del(pgd);
131 spin_unlock_irqrestore(&pgd_lock, flags); 129 spin_unlock(&pgd_lock);
132} 130}
133 131
134/* 132/*
@@ -260,7 +258,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
260{ 258{
261 pgd_t *pgd; 259 pgd_t *pgd;
262 pmd_t *pmds[PREALLOCATED_PMDS]; 260 pmd_t *pmds[PREALLOCATED_PMDS];
263 unsigned long flags;
264 261
265 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); 262 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
266 263
@@ -280,12 +277,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
280 * respect to anything walking the pgd_list, so that they 277 * respect to anything walking the pgd_list, so that they
281 * never see a partially populated pgd. 278 * never see a partially populated pgd.
282 */ 279 */
283 spin_lock_irqsave(&pgd_lock, flags); 280 spin_lock(&pgd_lock);
284 281
285 pgd_ctor(mm, pgd); 282 pgd_ctor(mm, pgd);
286 pgd_prepopulate_pmd(mm, pgd, pmds); 283 pgd_prepopulate_pmd(mm, pgd, pmds);
287 284
288 spin_unlock_irqrestore(&pgd_lock, flags); 285 spin_unlock(&pgd_lock);
289 286
290 return pgd; 287 return pgd;
291 288
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c
index 85b68ef5e809..9260b3eb18d4 100644
--- a/arch/x86/pci/ce4100.c
+++ b/arch/x86/pci/ce4100.c
@@ -34,6 +34,7 @@
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/init.h> 35#include <linux/init.h>
36 36
37#include <asm/ce4100.h>
37#include <asm/pci_x86.h> 38#include <asm/pci_x86.h>
38 39
39struct sim_reg { 40struct sim_reg {
@@ -306,10 +307,10 @@ struct pci_raw_ops ce4100_pci_conf = {
306 .write = ce4100_conf_write, 307 .write = ce4100_conf_write,
307}; 308};
308 309
309static int __init ce4100_pci_init(void) 310int __init ce4100_pci_init(void)
310{ 311{
311 init_sim_regs(); 312 init_sim_regs();
312 raw_pci_ops = &ce4100_pci_conf; 313 raw_pci_ops = &ce4100_pci_conf;
313 return 0; 314 /* Indicate caller that it should invoke pci_legacy_init() */
315 return 1;
314} 316}
315subsys_initcall(ce4100_pci_init);
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index d2c0d51a7178..cd6f184c3b3f 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -15,6 +15,7 @@
15#include <linux/serial_reg.h> 15#include <linux/serial_reg.h>
16#include <linux/serial_8250.h> 16#include <linux/serial_8250.h>
17 17
18#include <asm/ce4100.h>
18#include <asm/setup.h> 19#include <asm/setup.h>
19#include <asm/io.h> 20#include <asm/io.h>
20 21
@@ -129,4 +130,5 @@ void __init x86_ce4100_early_setup(void)
129 x86_init.resources.probe_roms = x86_init_noop; 130 x86_init.resources.probe_roms = x86_init_noop;
130 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 131 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
131 x86_init.mpparse.find_smp_config = sdv_find_smp_config; 132 x86_init.mpparse.find_smp_config = sdv_find_smp_config;
133 x86_init.pci.init = ce4100_pci_init;
132} 134}
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index dab874647530..044bda5b3174 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size)
140 * wasted bootmem) and hand off chunks of it to callers. 140 * wasted bootmem) and hand off chunks of it to callers.
141 */ 141 */
142 res = alloc_bootmem(chunk_size); 142 res = alloc_bootmem(chunk_size);
143 if (!res) 143 BUG_ON(!res);
144 return NULL;
145 prom_early_allocated += chunk_size; 144 prom_early_allocated += chunk_size;
146 memset(res, 0, chunk_size); 145 memset(res, 0, chunk_size);
147 free_mem = chunk_size; 146 free_mem = chunk_size;
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index df58e9cad96a..a7b38d35c29a 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1364,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode)
1364 memset(bd2, 0, sizeof(struct bau_desc)); 1364 memset(bd2, 0, sizeof(struct bau_desc));
1365 bd2->header.sw_ack_flag = 1; 1365 bd2->header.sw_ack_flag = 1;
1366 /* 1366 /*
1367 * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub 1367 * base_dest_nodeid is the nasid of the first uvhub
1368 * in the partition. The bit map will indicate uvhub numbers, 1368 * in the partition. The bit map will indicate uvhub numbers,
1369 * which are 0-N in a partition. Pnodes are unique system-wide. 1369 * which are 0-N in a partition. Pnodes are unique system-wide.
1370 */ 1370 */
1371 bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; 1371 bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode);
1372 bd2->header.dest_subnodeid = 0x10; /* the LB */ 1372 bd2->header.dest_subnodeid = 0x10; /* the LB */
1373 bd2->header.command = UV_NET_ENDPOINT_INTD; 1373 bd2->header.command = UV_NET_ENDPOINT_INTD;
1374 bd2->header.int_both = 1; 1374 bd2->header.int_both = 1;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 5e92b61ad574..f6089421147a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -986,10 +986,9 @@ static void xen_pgd_pin(struct mm_struct *mm)
986 */ 986 */
987void xen_mm_pin_all(void) 987void xen_mm_pin_all(void)
988{ 988{
989 unsigned long flags;
990 struct page *page; 989 struct page *page;
991 990
992 spin_lock_irqsave(&pgd_lock, flags); 991 spin_lock(&pgd_lock);
993 992
994 list_for_each_entry(page, &pgd_list, lru) { 993 list_for_each_entry(page, &pgd_list, lru) {
995 if (!PagePinned(page)) { 994 if (!PagePinned(page)) {
@@ -998,7 +997,7 @@ void xen_mm_pin_all(void)
998 } 997 }
999 } 998 }
1000 999
1001 spin_unlock_irqrestore(&pgd_lock, flags); 1000 spin_unlock(&pgd_lock);
1002} 1001}
1003 1002
1004/* 1003/*
@@ -1099,10 +1098,9 @@ static void xen_pgd_unpin(struct mm_struct *mm)
1099 */ 1098 */
1100void xen_mm_unpin_all(void) 1099void xen_mm_unpin_all(void)
1101{ 1100{
1102 unsigned long flags;
1103 struct page *page; 1101 struct page *page;
1104 1102
1105 spin_lock_irqsave(&pgd_lock, flags); 1103 spin_lock(&pgd_lock);
1106 1104
1107 list_for_each_entry(page, &pgd_list, lru) { 1105 list_for_each_entry(page, &pgd_list, lru) {
1108 if (PageSavePinned(page)) { 1106 if (PageSavePinned(page)) {
@@ -1112,7 +1110,7 @@ void xen_mm_unpin_all(void)
1112 } 1110 }
1113 } 1111 }
1114 1112
1115 spin_unlock_irqrestore(&pgd_lock, flags); 1113 spin_unlock(&pgd_lock);
1116} 1114}
1117 1115
1118void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) 1116void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/block/blk-core.c b/block/blk-core.c
index 2f4002f79a24..518dd423a5fe 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
352 WARN_ON(!irqs_disabled()); 352 WARN_ON(!irqs_disabled());
353 353
354 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 354 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
355 __blk_run_queue(q); 355 __blk_run_queue(q, false);
356} 356}
357EXPORT_SYMBOL(blk_start_queue); 357EXPORT_SYMBOL(blk_start_queue);
358 358
@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
403/** 403/**
404 * __blk_run_queue - run a single device queue 404 * __blk_run_queue - run a single device queue
405 * @q: The queue to run 405 * @q: The queue to run
406 * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
406 * 407 *
407 * Description: 408 * Description:
408 * See @blk_run_queue. This variant must be called with the queue lock 409 * See @blk_run_queue. This variant must be called with the queue lock
409 * held and interrupts disabled. 410 * held and interrupts disabled.
410 * 411 *
411 */ 412 */
412void __blk_run_queue(struct request_queue *q) 413void __blk_run_queue(struct request_queue *q, bool force_kblockd)
413{ 414{
414 blk_remove_plug(q); 415 blk_remove_plug(q);
415 416
@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
423 * Only recurse once to avoid overrunning the stack, let the unplug 424 * Only recurse once to avoid overrunning the stack, let the unplug
424 * handling reinvoke the handler shortly if we already got there. 425 * handling reinvoke the handler shortly if we already got there.
425 */ 426 */
426 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 427 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
427 q->request_fn(q); 428 q->request_fn(q);
428 queue_flag_clear(QUEUE_FLAG_REENTER, q); 429 queue_flag_clear(QUEUE_FLAG_REENTER, q);
429 } else { 430 } else {
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
446 unsigned long flags; 447 unsigned long flags;
447 448
448 spin_lock_irqsave(q->queue_lock, flags); 449 spin_lock_irqsave(q->queue_lock, flags);
449 __blk_run_queue(q); 450 __blk_run_queue(q, false);
450 spin_unlock_irqrestore(q->queue_lock, flags); 451 spin_unlock_irqrestore(q->queue_lock, flags);
451} 452}
452EXPORT_SYMBOL(blk_run_queue); 453EXPORT_SYMBOL(blk_run_queue);
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
1053 1054
1054 drive_stat_acct(rq, 1); 1055 drive_stat_acct(rq, 1);
1055 __elv_add_request(q, rq, where, 0); 1056 __elv_add_request(q, rq, where, 0);
1056 __blk_run_queue(q); 1057 __blk_run_queue(q, false);
1057 spin_unlock_irqrestore(q->queue_lock, flags); 1058 spin_unlock_irqrestore(q->queue_lock, flags);
1058} 1059}
1059EXPORT_SYMBOL(blk_insert_request); 1060EXPORT_SYMBOL(blk_insert_request);
@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2610} 2611}
2611EXPORT_SYMBOL(kblockd_schedule_work); 2612EXPORT_SYMBOL(kblockd_schedule_work);
2612 2613
2613int kblockd_schedule_delayed_work(struct request_queue *q,
2614 struct delayed_work *dwork, unsigned long delay)
2615{
2616 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2617}
2618EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2619
2620int __init blk_dev_init(void) 2614int __init blk_dev_init(void)
2621{ 2615{
2622 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2616 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 54b123d6563e..b27d0208611b 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
66 66
67 /* 67 /*
68 * Moving a request silently to empty queue_head may stall the 68 * Moving a request silently to empty queue_head may stall the
69 * queue. Kick the queue in those cases. 69 * queue. Kick the queue in those cases. This function is called
70 * from request completion path and calling directly into
71 * request_fn may confuse the driver. Always use kblockd.
70 */ 72 */
71 if (was_empty && next_rq) 73 if (was_empty && next_rq)
72 __blk_run_queue(q); 74 __blk_run_queue(q, true);
73} 75}
74 76
75static void pre_flush_end_io(struct request *rq, int error) 77static void pre_flush_end_io(struct request *rq, int error)
@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
130 BUG(); 132 BUG();
131 } 133 }
132 134
133 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 135 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
134 return rq; 136 return rq;
135} 137}
136 138
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 1a320d2406b0..bd3e8df4d5e2 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -109,7 +109,6 @@ struct bio_batch
109 atomic_t done; 109 atomic_t done;
110 unsigned long flags; 110 unsigned long flags;
111 struct completion *wait; 111 struct completion *wait;
112 bio_end_io_t *end_io;
113}; 112};
114 113
115static void bio_batch_end_io(struct bio *bio, int err) 114static void bio_batch_end_io(struct bio *bio, int err)
@@ -122,17 +121,14 @@ static void bio_batch_end_io(struct bio *bio, int err)
122 else 121 else
123 clear_bit(BIO_UPTODATE, &bb->flags); 122 clear_bit(BIO_UPTODATE, &bb->flags);
124 } 123 }
125 if (bb) { 124 if (bb)
126 if (bb->end_io) 125 if (atomic_dec_and_test(&bb->done))
127 bb->end_io(bio, err); 126 complete(bb->wait);
128 atomic_inc(&bb->done);
129 complete(bb->wait);
130 }
131 bio_put(bio); 127 bio_put(bio);
132} 128}
133 129
134/** 130/**
135 * blkdev_issue_zeroout generate number of zero filed write bios 131 * blkdev_issue_zeroout - generate number of zero filed write bios
136 * @bdev: blockdev to issue 132 * @bdev: blockdev to issue
137 * @sector: start sector 133 * @sector: start sector
138 * @nr_sects: number of sectors to write 134 * @nr_sects: number of sectors to write
@@ -150,13 +146,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
150 int ret; 146 int ret;
151 struct bio *bio; 147 struct bio *bio;
152 struct bio_batch bb; 148 struct bio_batch bb;
153 unsigned int sz, issued = 0; 149 unsigned int sz;
154 DECLARE_COMPLETION_ONSTACK(wait); 150 DECLARE_COMPLETION_ONSTACK(wait);
155 151
156 atomic_set(&bb.done, 0); 152 atomic_set(&bb.done, 1);
157 bb.flags = 1 << BIO_UPTODATE; 153 bb.flags = 1 << BIO_UPTODATE;
158 bb.wait = &wait; 154 bb.wait = &wait;
159 bb.end_io = NULL;
160 155
161submit: 156submit:
162 ret = 0; 157 ret = 0;
@@ -185,12 +180,12 @@ submit:
185 break; 180 break;
186 } 181 }
187 ret = 0; 182 ret = 0;
188 issued++; 183 atomic_inc(&bb.done);
189 submit_bio(WRITE, bio); 184 submit_bio(WRITE, bio);
190 } 185 }
191 186
192 /* Wait for bios in-flight */ 187 /* Wait for bios in-flight */
193 while (issued != atomic_read(&bb.done)) 188 if (!atomic_dec_and_test(&bb.done))
194 wait_for_completion(&wait); 189 wait_for_completion(&wait);
195 190
196 if (!test_bit(BIO_UPTODATE, &bb.flags)) 191 if (!test_bit(BIO_UPTODATE, &bb.flags))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a89043a3caa4..e36cc10a346c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
20/* Throttling is performed over 100ms slice and after that slice is renewed */ 20/* Throttling is performed over 100ms slice and after that slice is renewed */
21static unsigned long throtl_slice = HZ/10; /* 100 ms */ 21static unsigned long throtl_slice = HZ/10; /* 100 ms */
22 22
23/* A workqueue to queue throttle related work */
24static struct workqueue_struct *kthrotld_workqueue;
25static void throtl_schedule_delayed_work(struct throtl_data *td,
26 unsigned long delay);
27
23struct throtl_rb_root { 28struct throtl_rb_root {
24 struct rb_root rb; 29 struct rb_root rb;
25 struct rb_node *left; 30 struct rb_node *left;
@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
345 update_min_dispatch_time(st); 350 update_min_dispatch_time(st);
346 351
347 if (time_before_eq(st->min_disptime, jiffies)) 352 if (time_before_eq(st->min_disptime, jiffies))
348 throtl_schedule_delayed_work(td->queue, 0); 353 throtl_schedule_delayed_work(td, 0);
349 else 354 else
350 throtl_schedule_delayed_work(td->queue, 355 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
351 (st->min_disptime - jiffies));
352} 356}
353 357
354static inline void 358static inline void
@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
815} 819}
816 820
817/* Call with queue lock held */ 821/* Call with queue lock held */
818void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) 822static void
823throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
819{ 824{
820 825
821 struct throtl_data *td = q->td;
822 struct delayed_work *dwork = &td->throtl_work; 826 struct delayed_work *dwork = &td->throtl_work;
823 827
824 if (total_nr_queued(td) > 0) { 828 if (total_nr_queued(td) > 0) {
@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
827 * Cancel that and schedule a new one. 831 * Cancel that and schedule a new one.
828 */ 832 */
829 __cancel_delayed_work(dwork); 833 __cancel_delayed_work(dwork);
830 kblockd_schedule_delayed_work(q, dwork, delay); 834 queue_delayed_work(kthrotld_workqueue, dwork, delay);
831 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 835 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
832 delay, jiffies); 836 delay, jiffies);
833 } 837 }
834} 838}
835EXPORT_SYMBOL(throtl_schedule_delayed_work);
836 839
837static void 840static void
838throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) 841throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
920 smp_mb__after_atomic_inc(); 923 smp_mb__after_atomic_inc();
921 924
922 /* Schedule a work now to process the limit change */ 925 /* Schedule a work now to process the limit change */
923 throtl_schedule_delayed_work(td->queue, 0); 926 throtl_schedule_delayed_work(td, 0);
924} 927}
925 928
926static void throtl_update_blkio_group_write_bps(void *key, 929static void throtl_update_blkio_group_write_bps(void *key,
@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
934 smp_mb__before_atomic_inc(); 937 smp_mb__before_atomic_inc();
935 atomic_inc(&td->limits_changed); 938 atomic_inc(&td->limits_changed);
936 smp_mb__after_atomic_inc(); 939 smp_mb__after_atomic_inc();
937 throtl_schedule_delayed_work(td->queue, 0); 940 throtl_schedule_delayed_work(td, 0);
938} 941}
939 942
940static void throtl_update_blkio_group_read_iops(void *key, 943static void throtl_update_blkio_group_read_iops(void *key,
@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
948 smp_mb__before_atomic_inc(); 951 smp_mb__before_atomic_inc();
949 atomic_inc(&td->limits_changed); 952 atomic_inc(&td->limits_changed);
950 smp_mb__after_atomic_inc(); 953 smp_mb__after_atomic_inc();
951 throtl_schedule_delayed_work(td->queue, 0); 954 throtl_schedule_delayed_work(td, 0);
952} 955}
953 956
954static void throtl_update_blkio_group_write_iops(void *key, 957static void throtl_update_blkio_group_write_iops(void *key,
@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
962 smp_mb__before_atomic_inc(); 965 smp_mb__before_atomic_inc();
963 atomic_inc(&td->limits_changed); 966 atomic_inc(&td->limits_changed);
964 smp_mb__after_atomic_inc(); 967 smp_mb__after_atomic_inc();
965 throtl_schedule_delayed_work(td->queue, 0); 968 throtl_schedule_delayed_work(td, 0);
966} 969}
967 970
968void throtl_shutdown_timer_wq(struct request_queue *q) 971void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
1135 1138
1136static int __init throtl_init(void) 1139static int __init throtl_init(void)
1137{ 1140{
1141 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1142 if (!kthrotld_workqueue)
1143 panic("Failed to create kthrotld\n");
1144
1138 blkio_policy_register(&blkio_policy_throtl); 1145 blkio_policy_register(&blkio_policy_throtl);
1139 return 0; 1146 return 0;
1140} 1147}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7be4c7959625..ea83a4f0c27d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3355 cfqd->busy_queues > 1) { 3355 cfqd->busy_queues > 1) {
3356 cfq_del_timer(cfqd, cfqq); 3356 cfq_del_timer(cfqd, cfqq);
3357 cfq_clear_cfqq_wait_request(cfqq); 3357 cfq_clear_cfqq_wait_request(cfqq);
3358 __blk_run_queue(cfqd->queue); 3358 __blk_run_queue(cfqd->queue, false);
3359 } else { 3359 } else {
3360 cfq_blkiocg_update_idle_time_stats( 3360 cfq_blkiocg_update_idle_time_stats(
3361 &cfqq->cfqg->blkg); 3361 &cfqq->cfqg->blkg);
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3370 * this new queue is RT and the current one is BE 3370 * this new queue is RT and the current one is BE
3371 */ 3371 */
3372 cfq_preempt_queue(cfqd, cfqq); 3372 cfq_preempt_queue(cfqd, cfqq);
3373 __blk_run_queue(cfqd->queue); 3373 __blk_run_queue(cfqd->queue, false);
3374 } 3374 }
3375} 3375}
3376 3376
@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
3731 struct request_queue *q = cfqd->queue; 3731 struct request_queue *q = cfqd->queue;
3732 3732
3733 spin_lock_irq(q->queue_lock); 3733 spin_lock_irq(q->queue_lock);
3734 __blk_run_queue(cfqd->queue); 3734 __blk_run_queue(cfqd->queue, false);
3735 spin_unlock_irq(q->queue_lock); 3735 spin_unlock_irq(q->queue_lock);
3736} 3736}
3737 3737
diff --git a/block/elevator.c b/block/elevator.c
index 2569512830d3..236e93c1f46c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
602 */ 602 */
603 elv_drain_elevator(q); 603 elv_drain_elevator(q);
604 while (q->rq.elvpriv) { 604 while (q->rq.elvpriv) {
605 __blk_run_queue(q); 605 __blk_run_queue(q, false);
606 spin_unlock_irq(q->queue_lock); 606 spin_unlock_irq(q->queue_lock);
607 msleep(10); 607 msleep(10);
608 spin_lock_irq(q->queue_lock); 608 spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
651 * with anything. There's no point in delaying queue 651 * with anything. There's no point in delaying queue
652 * processing. 652 * processing.
653 */ 653 */
654 __blk_run_queue(q); 654 __blk_run_queue(q, false);
655 break; 655 break;
656 656
657 case ELEVATOR_INSERT_SORT: 657 case ELEVATOR_INSERT_SORT:
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 49e6a545eb63..dbf31ec9114d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -78,7 +78,6 @@
78 78
79#include <asm/uaccess.h> 79#include <asm/uaccess.h>
80 80
81static DEFINE_MUTEX(loop_mutex);
82static LIST_HEAD(loop_devices); 81static LIST_HEAD(loop_devices);
83static DEFINE_MUTEX(loop_devices_mutex); 82static DEFINE_MUTEX(loop_devices_mutex);
84 83
@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
1501{ 1500{
1502 struct loop_device *lo = bdev->bd_disk->private_data; 1501 struct loop_device *lo = bdev->bd_disk->private_data;
1503 1502
1504 mutex_lock(&loop_mutex);
1505 mutex_lock(&lo->lo_ctl_mutex); 1503 mutex_lock(&lo->lo_ctl_mutex);
1506 lo->lo_refcnt++; 1504 lo->lo_refcnt++;
1507 mutex_unlock(&lo->lo_ctl_mutex); 1505 mutex_unlock(&lo->lo_ctl_mutex);
1508 mutex_unlock(&loop_mutex);
1509 1506
1510 return 0; 1507 return 0;
1511} 1508}
@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
1515 struct loop_device *lo = disk->private_data; 1512 struct loop_device *lo = disk->private_data;
1516 int err; 1513 int err;
1517 1514
1518 mutex_lock(&loop_mutex);
1519 mutex_lock(&lo->lo_ctl_mutex); 1515 mutex_lock(&lo->lo_ctl_mutex);
1520 1516
1521 if (--lo->lo_refcnt) 1517 if (--lo->lo_refcnt)
@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
1540out: 1536out:
1541 mutex_unlock(&lo->lo_ctl_mutex); 1537 mutex_unlock(&lo->lo_ctl_mutex);
1542out_unlocked: 1538out_unlocked:
1543 mutex_unlock(&loop_mutex);
1544 return 0; 1539 return 0;
1545} 1540}
1546 1541
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 7855f9f45b8e..62787e30d508 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -900,6 +900,14 @@ static void sender(void *send_info,
900 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 900 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
901#endif 901#endif
902 902
903 /*
904 * last_timeout_jiffies is updated here to avoid
905 * smi_timeout() handler passing very large time_diff
906 * value to smi_event_handler() that causes
907 * the send command to abort.
908 */
909 smi_info->last_timeout_jiffies = jiffies;
910
903 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); 911 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
904 912
905 if (smi_info->thread) 913 if (smi_info->thread)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 490393186338..84b164d1eb2b 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -388,6 +388,10 @@ static void discard_port_data(struct port *port)
388 unsigned int len; 388 unsigned int len;
389 int ret; 389 int ret;
390 390
391 if (!port->portdev) {
392 /* Device has been unplugged. vqs are already gone. */
393 return;
394 }
391 vq = port->in_vq; 395 vq = port->in_vq;
392 if (port->inbuf) 396 if (port->inbuf)
393 buf = port->inbuf; 397 buf = port->inbuf;
@@ -470,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port)
470 void *buf; 474 void *buf;
471 unsigned int len; 475 unsigned int len;
472 476
477 if (!port->portdev) {
478 /* Device has been unplugged. vqs are already gone. */
479 return;
480 }
473 while ((buf = virtqueue_get_buf(port->out_vq, &len))) { 481 while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
474 kfree(buf); 482 kfree(buf);
475 port->outvq_full = false; 483 port->outvq_full = false;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1109f6848a43..5cb4d09919d6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1919 1919
1920 ret = sysdev_driver_register(&cpu_sysdev_class, 1920 ret = sysdev_driver_register(&cpu_sysdev_class,
1921 &cpufreq_sysdev_driver); 1921 &cpufreq_sysdev_driver);
1922 if (ret)
1923 goto err_null_driver;
1922 1924
1923 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { 1925 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1924 int i; 1926 int i;
1925 ret = -ENODEV; 1927 ret = -ENODEV;
1926 1928
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1935 if (ret) { 1937 if (ret) {
1936 dprintk("no CPU initialized for driver %s\n", 1938 dprintk("no CPU initialized for driver %s\n",
1937 driver_data->name); 1939 driver_data->name);
1938 sysdev_driver_unregister(&cpu_sysdev_class, 1940 goto err_sysdev_unreg;
1939 &cpufreq_sysdev_driver);
1940
1941 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1942 cpufreq_driver = NULL;
1943 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1944 } 1941 }
1945 } 1942 }
1946 1943
1947 if (!ret) { 1944 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1948 register_hotcpu_notifier(&cpufreq_cpu_notifier); 1945 dprintk("driver %s up and running\n", driver_data->name);
1949 dprintk("driver %s up and running\n", driver_data->name); 1946 cpufreq_debug_enable_ratelimit();
1950 cpufreq_debug_enable_ratelimit();
1951 }
1952 1947
1948 return 0;
1949err_sysdev_unreg:
1950 sysdev_driver_unregister(&cpu_sysdev_class,
1951 &cpufreq_sysdev_driver);
1952err_null_driver:
1953 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1954 cpufreq_driver = NULL;
1955 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1953 return ret; 1956 return ret;
1954} 1957}
1955EXPORT_SYMBOL_GPL(cpufreq_register_driver); 1958EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
index cead8e6ff345..7f6f01a4b145 100644
--- a/drivers/gpio/ml_ioh_gpio.c
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -326,6 +326,7 @@ static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = {
326 { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) }, 326 { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
327 { 0, } 327 { 0, }
328}; 328};
329MODULE_DEVICE_TABLE(pci, ioh_gpio_pcidev_id);
329 330
330static struct pci_driver ioh_gpio_driver = { 331static struct pci_driver ioh_gpio_driver = {
331 .name = "ml_ioh_gpio", 332 .name = "ml_ioh_gpio",
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index 0eba0a75c804..2c6af8705103 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -286,6 +286,7 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
286 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) }, 286 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
287 { 0, } 287 { 0, }
288}; 288};
289MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
289 290
290static struct pci_driver pch_gpio_driver = { 291static struct pci_driver pch_gpio_driver = {
291 .name = "pch_gpio", 292 .name = "pch_gpio",
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6977a1ce9d98..f73ef4390db6 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
672 struct drm_crtc_helper_funcs *crtc_funcs; 672 struct drm_crtc_helper_funcs *crtc_funcs;
673 u16 *red, *green, *blue, *transp; 673 u16 *red, *green, *blue, *transp;
674 struct drm_crtc *crtc; 674 struct drm_crtc *crtc;
675 int i, rc = 0; 675 int i, j, rc = 0;
676 int start; 676 int start;
677 677
678 for (i = 0; i < fb_helper->crtc_count; i++) { 678 for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
685 transp = cmap->transp; 685 transp = cmap->transp;
686 start = cmap->start; 686 start = cmap->start;
687 687
688 for (i = 0; i < cmap->len; i++) { 688 for (j = 0; j < cmap->len; j++) {
689 u16 hred, hgreen, hblue, htransp = 0xffff; 689 u16 hred, hgreen, hblue, htransp = 0xffff;
690 690
691 hred = *red++; 691 hred = *red++;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3601466c5502..4ff9b6cc973f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
865 int max_freq; 865 int max_freq;
866 866
867 /* RPSTAT1 is in the GT power well */ 867 /* RPSTAT1 is in the GT power well */
868 __gen6_force_wake_get(dev_priv); 868 __gen6_gt_force_wake_get(dev_priv);
869 869
870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); 871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
889 max_freq * 100); 889 max_freq * 100);
890 890
891 __gen6_force_wake_put(dev_priv); 891 __gen6_gt_force_wake_put(dev_priv);
892 } else { 892 } else {
893 seq_printf(m, "no P-state info available\n"); 893 seq_printf(m, "no P-state info available\n");
894 } 894 }
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 17bd766f2081..e33d9be7df3b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1895 if (IS_GEN2(dev)) 1895 if (IS_GEN2(dev))
1896 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1896 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1897 1897
1898 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1899 * using 32bit addressing, overwriting memory if HWS is located
1900 * above 4GB.
1901 *
1902 * The documentation also mentions an issue with undefined
1903 * behaviour if any general state is accessed within a page above 4GB,
1904 * which also needs to be handled carefully.
1905 */
1906 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1907 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1908
1898 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1909 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1899 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1910 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1900 if (!dev_priv->regs) { 1911 if (!dev_priv->regs) {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0ad533f06af9..22ec066adae6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0600); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_semaphores = 0;
50module_param_named(semaphores, i915_semaphores, int, 0600);
51
49unsigned int i915_enable_rc6 = 0; 52unsigned int i915_enable_rc6 = 0;
50module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 53module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
51 54
@@ -254,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev)
254 } 257 }
255} 258}
256 259
257void __gen6_force_wake_get(struct drm_i915_private *dev_priv) 260void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
258{ 261{
259 int count; 262 int count;
260 263
@@ -270,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
270 udelay(10); 273 udelay(10);
271} 274}
272 275
273void __gen6_force_wake_put(struct drm_i915_private *dev_priv) 276void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
274{ 277{
275 I915_WRITE_NOTRACE(FORCEWAKE, 0); 278 I915_WRITE_NOTRACE(FORCEWAKE, 0);
276 POSTING_READ(FORCEWAKE); 279 POSTING_READ(FORCEWAKE);
277} 280}
278 281
282void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
283{
284 int loop = 500;
285 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
286 while (fifo < 20 && loop--) {
287 udelay(10);
288 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
289 }
290}
291
279static int i915_drm_freeze(struct drm_device *dev) 292static int i915_drm_freeze(struct drm_device *dev)
280{ 293{
281 struct drm_i915_private *dev_priv = dev->dev_private; 294 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 65dfe81d0035..456f40484838 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -956,6 +956,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
956extern int i915_max_ioctl; 956extern int i915_max_ioctl;
957extern unsigned int i915_fbpercrtc; 957extern unsigned int i915_fbpercrtc;
958extern unsigned int i915_powersave; 958extern unsigned int i915_powersave;
959extern unsigned int i915_semaphores;
959extern unsigned int i915_lvds_downclock; 960extern unsigned int i915_lvds_downclock;
960extern unsigned int i915_panel_use_ssc; 961extern unsigned int i915_panel_use_ssc;
961extern unsigned int i915_enable_rc6; 962extern unsigned int i915_enable_rc6;
@@ -1177,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
1177void i915_gem_free_all_phys_object(struct drm_device *dev); 1178void i915_gem_free_all_phys_object(struct drm_device *dev);
1178void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1179void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1179 1180
1181uint32_t
1182i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
1183
1180/* i915_gem_gtt.c */ 1184/* i915_gem_gtt.c */
1181void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1185void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1182int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1186int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1353,22 +1357,32 @@ __i915_write(64, q)
1353 * must be set to prevent GT core from power down and stale values being 1357 * must be set to prevent GT core from power down and stale values being
1354 * returned. 1358 * returned.
1355 */ 1359 */
1356void __gen6_force_wake_get(struct drm_i915_private *dev_priv); 1360void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1357void __gen6_force_wake_put (struct drm_i915_private *dev_priv); 1361void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1358static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) 1362void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1363
1364static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
1359{ 1365{
1360 u32 val; 1366 u32 val;
1361 1367
1362 if (dev_priv->info->gen >= 6) { 1368 if (dev_priv->info->gen >= 6) {
1363 __gen6_force_wake_get(dev_priv); 1369 __gen6_gt_force_wake_get(dev_priv);
1364 val = I915_READ(reg); 1370 val = I915_READ(reg);
1365 __gen6_force_wake_put(dev_priv); 1371 __gen6_gt_force_wake_put(dev_priv);
1366 } else 1372 } else
1367 val = I915_READ(reg); 1373 val = I915_READ(reg);
1368 1374
1369 return val; 1375 return val;
1370} 1376}
1371 1377
1378static inline void i915_gt_write(struct drm_i915_private *dev_priv,
1379 u32 reg, u32 val)
1380{
1381 if (dev_priv->info->gen >= 6)
1382 __gen6_gt_wait_for_fifo(dev_priv);
1383 I915_WRITE(reg, val);
1384}
1385
1372static inline void 1386static inline void
1373i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) 1387i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
1374{ 1388{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf4f74c7c6fb..36e66cc5225e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1398 * Return the required GTT alignment for an object, only taking into account 1398 * Return the required GTT alignment for an object, only taking into account
1399 * unfenced tiled surface requirements. 1399 * unfenced tiled surface requirements.
1400 */ 1400 */
1401static uint32_t 1401uint32_t
1402i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) 1402i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1403{ 1403{
1404 struct drm_device *dev = obj->base.dev; 1404 struct drm_device *dev = obj->base.dev;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2f445e825f2..50ab1614571c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
772 if (from == NULL || to == from) 772 if (from == NULL || to == from)
773 return 0; 773 return 0;
774 774
775 /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ 775 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
776 if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) 776 if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
777 return i915_gem_object_wait_rendering(obj, true); 777 return i915_gem_object_wait_rendering(obj, true);
778 778
779 idx = intel_ring_sync_index(from, to); 779 idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 79a04fde69b5..d64843e18df2 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
184static bool 184static bool
185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
186{ 186{
187 int tile_width, tile_height; 187 int tile_width;
188 188
189 /* Linear is always fine */ 189 /* Linear is always fine */
190 if (tiling_mode == I915_TILING_NONE) 190 if (tiling_mode == I915_TILING_NONE)
@@ -215,20 +215,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
215 } 215 }
216 } 216 }
217 217
218 if (IS_GEN2(dev) ||
219 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
220 tile_height = 32;
221 else
222 tile_height = 8;
223 /* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
224 * number of tile rows. */
225 if (IS_GEN2(dev))
226 tile_height *= 2;
227
228 /* Size needs to be aligned to a full tile row */
229 if (size & (tile_height * stride - 1))
230 return false;
231
232 /* 965+ just needs multiples of tile width */ 218 /* 965+ just needs multiples of tile width */
233 if (INTEL_INFO(dev)->gen >= 4) { 219 if (INTEL_INFO(dev)->gen >= 4) {
234 if (stride & (tile_width - 1)) 220 if (stride & (tile_width - 1))
@@ -363,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
363 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && 349 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
364 i915_gem_object_fence_ok(obj, args->tiling_mode)); 350 i915_gem_object_fence_ok(obj, args->tiling_mode));
365 351
366 obj->tiling_changed = true; 352 /* Rebind if we need a change of alignment */
367 obj->tiling_mode = args->tiling_mode; 353 if (!obj->map_and_fenceable) {
368 obj->stride = args->stride; 354 u32 unfenced_alignment =
355 i915_gem_get_unfenced_gtt_alignment(obj);
356 if (obj->gtt_offset & (unfenced_alignment - 1))
357 ret = i915_gem_object_unbind(obj);
358 }
359
360 if (ret == 0) {
361 obj->tiling_changed = true;
362 obj->tiling_mode = args->tiling_mode;
363 obj->stride = args->stride;
364 }
369 } 365 }
366 /* we have to maintain this existing ABI... */
367 args->stride = obj->stride;
368 args->tiling_mode = obj->tiling_mode;
370 drm_gem_object_unreference(&obj->base); 369 drm_gem_object_unreference(&obj->base);
371 mutex_unlock(&dev->struct_mutex); 370 mutex_unlock(&dev->struct_mutex);
372 371
373 return 0; 372 return ret;
374} 373}
375 374
376/** 375/**
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 729d4233b763..2abe240dae58 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,7 +1553,17 @@
1553 1553
1554/* Backlight control */ 1554/* Backlight control */
1555#define BLC_PWM_CTL 0x61254 1555#define BLC_PWM_CTL 0x61254
1556#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
1556#define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1557#define BLC_PWM_CTL2 0x61250 /* 965+ only */
1558#define BLM_COMBINATION_MODE (1 << 30)
1559/*
1560 * This is the most significant 15 bits of the number of backlight cycles in a
1561 * complete cycle of the modulated backlight control.
1562 *
1563 * The actual value is this field multiplied by two.
1564 */
1565#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
1566#define BLM_LEGACY_MODE (1 << 16)
1557/* 1567/*
1558 * This is the number of cycles out of the backlight modulation cycle for which 1568 * This is the number of cycles out of the backlight modulation cycle for which
1559 * the backlight is on. 1569 * the backlight is on.
@@ -3261,6 +3271,8 @@
3261#define FORCEWAKE 0xA18C 3271#define FORCEWAKE 0xA18C
3262#define FORCEWAKE_ACK 0x130090 3272#define FORCEWAKE_ACK 0x130090
3263 3273
3274#define GT_FIFO_FREE_ENTRIES 0x120008
3275
3264#define GEN6_RPNSWREQ 0xA008 3276#define GEN6_RPNSWREQ 0xA008
3265#define GEN6_TURBO_DISABLE (1<<31) 3277#define GEN6_TURBO_DISABLE (1<<31)
3266#define GEN6_FREQUENCY(x) ((x)<<25) 3278#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e79b25bbee6c..49fb54fd9a18 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1219 u32 blt_ecoskpd; 1219 u32 blt_ecoskpd;
1220 1220
1221 /* Make sure blitter notifies FBC of writes */ 1221 /* Make sure blitter notifies FBC of writes */
1222 __gen6_force_wake_get(dev_priv); 1222 __gen6_gt_force_wake_get(dev_priv);
1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1225 GEN6_BLITTER_LOCK_SHIFT; 1225 GEN6_BLITTER_LOCK_SHIFT;
@@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1230 GEN6_BLITTER_LOCK_SHIFT); 1230 GEN6_BLITTER_LOCK_SHIFT);
1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1232 POSTING_READ(GEN6_BLITTER_ECOSKPD); 1232 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1233 __gen6_force_wake_put(dev_priv); 1233 __gen6_gt_force_wake_put(dev_priv);
1234} 1234}
1235 1235
1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -6282,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6282 * userspace... 6282 * userspace...
6283 */ 6283 */
6284 I915_WRITE(GEN6_RC_STATE, 0); 6284 I915_WRITE(GEN6_RC_STATE, 0);
6285 __gen6_force_wake_get(dev_priv); 6285 __gen6_gt_force_wake_get(dev_priv);
6286 6286
6287 /* disable the counters and set deterministic thresholds */ 6287 /* disable the counters and set deterministic thresholds */
6288 I915_WRITE(GEN6_RC_CONTROL, 0); 6288 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6380,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6380 /* enable all PM interrupts */ 6380 /* enable all PM interrupts */
6381 I915_WRITE(GEN6_PMINTRMSK, 0); 6381 I915_WRITE(GEN6_PMINTRMSK, 0);
6382 6382
6383 __gen6_force_wake_put(dev_priv); 6383 __gen6_gt_force_wake_put(dev_priv);
6384} 6384}
6385 6385
6386void intel_enable_clock_gating(struct drm_device *dev) 6386void intel_enable_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index d860abeda70f..f8f86e57df22 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,6 +30,8 @@
30 30
31#include "intel_drv.h" 31#include "intel_drv.h"
32 32
33#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
34
33void 35void
34intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 36intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
35 struct drm_display_mode *adjusted_mode) 37 struct drm_display_mode *adjusted_mode)
@@ -110,6 +112,19 @@ done:
110 dev_priv->pch_pf_size = (width << 16) | height; 112 dev_priv->pch_pf_size = (width << 16) | height;
111} 113}
112 114
115static int is_backlight_combination_mode(struct drm_device *dev)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118
119 if (INTEL_INFO(dev)->gen >= 4)
120 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
121
122 if (IS_GEN2(dev))
123 return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
124
125 return 0;
126}
127
113static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 128static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
114{ 129{
115 u32 val; 130 u32 val;
@@ -166,6 +181,9 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
166 if (INTEL_INFO(dev)->gen < 4) 181 if (INTEL_INFO(dev)->gen < 4)
167 max &= ~1; 182 max &= ~1;
168 } 183 }
184
185 if (is_backlight_combination_mode(dev))
186 max *= 0xff;
169 } 187 }
170 188
171 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); 189 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -183,6 +201,14 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
183 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 201 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
184 if (IS_PINEVIEW(dev)) 202 if (IS_PINEVIEW(dev))
185 val >>= 1; 203 val >>= 1;
204
205 if (is_backlight_combination_mode(dev)){
206 u8 lbpc;
207
208 val &= ~1;
209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
210 val *= lbpc;
211 }
186 } 212 }
187 213
188 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 214 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -205,6 +231,16 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
205 231
206 if (HAS_PCH_SPLIT(dev)) 232 if (HAS_PCH_SPLIT(dev))
207 return intel_pch_panel_set_backlight(dev, level); 233 return intel_pch_panel_set_backlight(dev, level);
234
235 if (is_backlight_combination_mode(dev)){
236 u32 max = intel_panel_get_max_backlight(dev);
237 u8 lbpc;
238
239 lbpc = level * 0xfe / max + 1;
240 level /= lbpc;
241 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
242 }
243
208 tmp = I915_READ(BLC_PWM_CTL); 244 tmp = I915_READ(BLC_PWM_CTL);
209 if (IS_PINEVIEW(dev)) { 245 if (IS_PINEVIEW(dev)) {
210 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); 246 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6d6fde85a636..34306865a5df 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,22 +14,23 @@ struct intel_hw_status_page {
14 struct drm_i915_gem_object *obj; 14 struct drm_i915_gem_object *obj;
15}; 15};
16 16
17#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) 17#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
18#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
18 19
19#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) 20#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
20#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 21#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
21 22
22#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) 23#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
23#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 24#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
24 25
25#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) 26#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
26#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 27#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
27 28
28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) 29#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
29#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 30#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
30 31
31#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) 32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
33#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
33 34
34#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) 35#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
35#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) 36#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65699bfaaaea..b368ed74aad7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan)
83 return ret; 83 return ret;
84 84
85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); 86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
87 &chan->m2mf_ntfy);
87 if (ret) 88 if (ret)
88 return ret; 89 return ret;
89 90
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9821fcacc3d2..982d70b12722 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
852extern int nouveau_notifier_init_channel(struct nouveau_channel *); 852extern int nouveau_notifier_init_channel(struct nouveau_channel *);
853extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); 853extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
854extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, 854extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
855 int cout, uint32_t *offset); 855 int cout, uint32_t start, uint32_t end,
856 uint32_t *offset);
856extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); 857extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
857extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, 858extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
858 struct drm_file *); 859 struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 26347b7cd872..b0fb9bdcddb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
725 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, 725 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
726 mem->page_alignment << PAGE_SHIFT, size_nc, 726 mem->page_alignment << PAGE_SHIFT, size_nc,
727 (nvbo->tile_flags >> 8) & 0xff, &node); 727 (nvbo->tile_flags >> 8) & 0xff, &node);
728 if (ret) 728 if (ret) {
729 return ret; 729 mem->mm_node = NULL;
730 return (ret == -ENOSPC) ? 0 : ret;
731 }
730 732
731 node->page_shift = 12; 733 node->page_shift = 12;
732 if (nvbo->vma.node) 734 if (nvbo->vma.node)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 8844b50c3e54..7609756b6faf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
123 return 0; 123 return 0;
124 } 124 }
125 125
126 return -ENOMEM; 126 return -ENOSPC;
127} 127}
128 128
129int 129int
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index fe29d604b820..5ea167623a82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
96 96
97int 97int
98nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, 98nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
99 int size, uint32_t *b_offset) 99 int size, uint32_t start, uint32_t end,
100 uint32_t *b_offset)
100{ 101{
101 struct drm_device *dev = chan->dev; 102 struct drm_device *dev = chan->dev;
102 struct nouveau_gpuobj *nobj = NULL; 103 struct nouveau_gpuobj *nobj = NULL;
@@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
104 uint32_t offset; 105 uint32_t offset;
105 int target, ret; 106 int target, ret;
106 107
107 mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); 108 mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
109 start, end, 0);
108 if (mem) 110 if (mem)
109 mem = drm_mm_get_block(mem, size, 0); 111 mem = drm_mm_get_block_range(mem, size, 0, start, end);
110 if (!mem) { 112 if (!mem) {
111 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); 113 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
112 return -ENOMEM; 114 return -ENOMEM;
@@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
177 if (IS_ERR(chan)) 179 if (IS_ERR(chan))
178 return PTR_ERR(chan); 180 return PTR_ERR(chan);
179 181
180 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); 182 ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
183 &na->offset);
181 nouveau_channel_put(&chan); 184 nouveau_channel_put(&chan);
182 return ret; 185 return ret;
183} 186}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index ea0041810ae3..e57caa2a00e3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
403void 403void
404nv50_instmem_flush(struct drm_device *dev) 404nv50_instmem_flush(struct drm_device *dev)
405{ 405{
406 struct drm_nouveau_private *dev_priv = dev->dev_private;
407
408 spin_lock(&dev_priv->ramin_lock);
406 nv_wr32(dev, 0x00330c, 0x00000001); 409 nv_wr32(dev, 0x00330c, 0x00000001);
407 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
408 NV_ERROR(dev, "PRAMIN flush timeout\n"); 411 NV_ERROR(dev, "PRAMIN flush timeout\n");
412 spin_unlock(&dev_priv->ramin_lock);
409} 413}
410 414
411void 415void
412nv84_instmem_flush(struct drm_device *dev) 416nv84_instmem_flush(struct drm_device *dev)
413{ 417{
418 struct drm_nouveau_private *dev_priv = dev->dev_private;
419
420 spin_lock(&dev_priv->ramin_lock);
414 nv_wr32(dev, 0x070000, 0x00000001); 421 nv_wr32(dev, 0x070000, 0x00000001);
415 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 422 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
416 NV_ERROR(dev, "PRAMIN flush timeout\n"); 423 NV_ERROR(dev, "PRAMIN flush timeout\n");
424 spin_unlock(&dev_priv->ramin_lock);
417} 425}
418 426
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 459ff08241e5..6144156f255a 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm)
169void 169void
170nv50_vm_flush_engine(struct drm_device *dev, int engine) 170nv50_vm_flush_engine(struct drm_device *dev, int engine)
171{ 171{
172 struct drm_nouveau_private *dev_priv = dev->dev_private;
173
174 spin_lock(&dev_priv->ramin_lock);
172 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 175 nv_wr32(dev, 0x100c80, (engine << 16) | 1);
173 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 176 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
174 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 177 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
178 spin_unlock(&dev_priv->ramin_lock);
175} 179}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index d270b3ff896b..6140ea1de45a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2194,7 +2194,6 @@ int evergreen_mc_init(struct radeon_device *rdev)
2194 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2194 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2195 } 2195 }
2196 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2196 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2197 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
2198 r700_vram_gtt_location(rdev, &rdev->mc); 2197 r700_vram_gtt_location(rdev, &rdev->mc);
2199 radeon_update_bandwidth_info(rdev); 2198 radeon_update_bandwidth_info(rdev);
2200 2199
@@ -2934,7 +2933,7 @@ static int evergreen_startup(struct radeon_device *rdev)
2934 /* XXX: ontario has problems blitting to gart at the moment */ 2933 /* XXX: ontario has problems blitting to gart at the moment */
2935 if (rdev->family == CHIP_PALM) { 2934 if (rdev->family == CHIP_PALM) {
2936 rdev->asic->copy = NULL; 2935 rdev->asic->copy = NULL;
2937 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 2936 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2938 } 2937 }
2939 2938
2940 /* allocate wb buffer */ 2939 /* allocate wb buffer */
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2adfb03f479b..2be698e78ff2 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -623,7 +623,7 @@ done:
623 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 623 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
624 return r; 624 return r;
625 } 625 }
626 rdev->mc.active_vram_size = rdev->mc.real_vram_size; 626 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
627 return 0; 627 return 0;
628} 628}
629 629
@@ -631,7 +631,7 @@ void evergreen_blit_fini(struct radeon_device *rdev)
631{ 631{
632 int r; 632 int r;
633 633
634 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 634 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
635 if (rdev->r600_blit.shader_obj == NULL) 635 if (rdev->r600_blit.shader_obj == NULL)
636 return; 636 return;
637 /* If we can't reserve the bo, unref should be enough to destroy 637 /* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 93fa735c8c1a..e372f9e1e5ce 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -70,23 +70,6 @@ MODULE_FIRMWARE(FIRMWARE_R520);
70 70
71void r100_pre_page_flip(struct radeon_device *rdev, int crtc) 71void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
72{ 72{
73 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
74 u32 tmp;
75
76 /* make sure flip is at vb rather than hb */
77 tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
78 tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
79 /* make sure pending bit is asserted */
80 tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
81 WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
82
83 /* set pageflip to happen as late as possible in the vblank interval.
84 * same field for crtc1/2
85 */
86 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
87 tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
88 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
89
90 /* enable the pflip int */ 73 /* enable the pflip int */
91 radeon_irq_kms_pflip_irq_get(rdev, crtc); 74 radeon_irq_kms_pflip_irq_get(rdev, crtc);
92} 75}
@@ -1041,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1041 return r; 1024 return r;
1042 } 1025 }
1043 rdev->cp.ready = true; 1026 rdev->cp.ready = true;
1044 rdev->mc.active_vram_size = rdev->mc.real_vram_size; 1027 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1045 return 0; 1028 return 0;
1046} 1029}
1047 1030
@@ -1059,7 +1042,7 @@ void r100_cp_fini(struct radeon_device *rdev)
1059void r100_cp_disable(struct radeon_device *rdev) 1042void r100_cp_disable(struct radeon_device *rdev)
1060{ 1043{
1061 /* Disable ring */ 1044 /* Disable ring */
1062 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1045 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1063 rdev->cp.ready = false; 1046 rdev->cp.ready = false;
1064 WREG32(RADEON_CP_CSQ_MODE, 0); 1047 WREG32(RADEON_CP_CSQ_MODE, 0);
1065 WREG32(RADEON_CP_CSQ_CNTL, 0); 1048 WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -2329,7 +2312,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
2329 /* FIXME we don't use the second aperture yet when we could use it */ 2312 /* FIXME we don't use the second aperture yet when we could use it */
2330 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2313 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2331 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2314 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2332 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
2333 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2315 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2334 if (rdev->flags & RADEON_IS_IGP) { 2316 if (rdev->flags & RADEON_IS_IGP) {
2335 uint32_t tom; 2317 uint32_t tom;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index de88624d5f87..9b3fad23b76c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1255,7 +1255,6 @@ int r600_mc_init(struct radeon_device *rdev)
1255 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1255 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1256 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1256 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1257 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1257 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1258 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1259 r600_vram_gtt_location(rdev, &rdev->mc); 1258 r600_vram_gtt_location(rdev, &rdev->mc);
1260 1259
1261 if (rdev->flags & RADEON_IS_IGP) { 1260 if (rdev->flags & RADEON_IS_IGP) {
@@ -1937,7 +1936,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1937 */ 1936 */
1938void r600_cp_stop(struct radeon_device *rdev) 1937void r600_cp_stop(struct radeon_device *rdev)
1939{ 1938{
1940 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1939 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1941 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1940 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1942 WREG32(SCRATCH_UMSK, 0); 1941 WREG32(SCRATCH_UMSK, 0);
1943} 1942}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 41f7aafc97c4..df68d91e8190 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -558,7 +558,7 @@ done:
558 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 558 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
559 return r; 559 return r;
560 } 560 }
561 rdev->mc.active_vram_size = rdev->mc.real_vram_size; 561 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
562 return 0; 562 return 0;
563} 563}
564 564
@@ -566,7 +566,7 @@ void r600_blit_fini(struct radeon_device *rdev)
566{ 566{
567 int r; 567 int r;
568 568
569 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 569 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
570 if (rdev->r600_blit.shader_obj == NULL) 570 if (rdev->r600_blit.shader_obj == NULL)
571 return; 571 return;
572 /* If we can't reserve the bo, unref should be enough to destroy 572 /* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 56c48b67ef3d..6b3429495118 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -345,7 +345,6 @@ struct radeon_mc {
345 * about vram size near mc fb location */ 345 * about vram size near mc fb location */
346 u64 mc_vram_size; 346 u64 mc_vram_size;
347 u64 visible_vram_size; 347 u64 visible_vram_size;
348 u64 active_vram_size;
349 u64 gtt_size; 348 u64 gtt_size;
350 u64 gtt_start; 349 u64 gtt_start;
351 u64 gtt_end; 350 u64 gtt_end;
@@ -1448,6 +1447,7 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m
1448extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1447extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1449extern int radeon_resume_kms(struct drm_device *dev); 1448extern int radeon_resume_kms(struct drm_device *dev);
1450extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1449extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1450extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
1451 1451
1452/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ 1452/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
1453extern bool r600_card_posted(struct radeon_device *rdev); 1453extern bool r600_card_posted(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e75d63b8e21d..793c5e6026ad 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,6 +834,9 @@ static struct radeon_asic sumo_asic = {
834 .pm_finish = &evergreen_pm_finish, 834 .pm_finish = &evergreen_pm_finish,
835 .pm_init_profile = &rs780_pm_init_profile, 835 .pm_init_profile = &rs780_pm_init_profile,
836 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 836 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
837 .pre_page_flip = &evergreen_pre_page_flip,
838 .page_flip = &evergreen_page_flip,
839 .post_page_flip = &evergreen_post_page_flip,
837}; 840};
838 841
839static struct radeon_asic btc_asic = { 842static struct radeon_asic btc_asic = {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index df95eb83dac6..1fe95dfe48c9 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -156,9 +156,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
156{ 156{
157 struct radeon_device *rdev = dev->dev_private; 157 struct radeon_device *rdev = dev->dev_private;
158 struct drm_radeon_gem_info *args = data; 158 struct drm_radeon_gem_info *args = data;
159 struct ttm_mem_type_manager *man;
160
161 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
159 162
160 args->vram_size = rdev->mc.real_vram_size; 163 args->vram_size = rdev->mc.real_vram_size;
161 args->vram_visible = rdev->mc.real_vram_size; 164 args->vram_visible = (u64)man->size << PAGE_SHIFT;
162 if (rdev->stollen_vga_memory) 165 if (rdev->stollen_vga_memory)
163 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 166 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
164 args->vram_visible -= radeon_fbdev_total_size(rdev); 167 args->vram_visible -= radeon_fbdev_total_size(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cf0638c3b7c7..78968b738e88 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -443,7 +443,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
443 (target_fb->bits_per_pixel * 8)); 443 (target_fb->bits_per_pixel * 8));
444 crtc_pitch |= crtc_pitch << 16; 444 crtc_pitch |= crtc_pitch << 16;
445 445
446 446 crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
447 if (tiling_flags & RADEON_TILING_MACRO) { 447 if (tiling_flags & RADEON_TILING_MACRO) {
448 if (ASIC_IS_R300(rdev)) 448 if (ASIC_IS_R300(rdev))
449 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | 449 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
@@ -502,6 +502,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
502 gen_cntl_val = RREG32(gen_cntl_reg); 502 gen_cntl_val = RREG32(gen_cntl_reg);
503 gen_cntl_val &= ~(0xf << 8); 503 gen_cntl_val &= ~(0xf << 8);
504 gen_cntl_val |= (format << 8); 504 gen_cntl_val |= (format << 8);
505 gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
505 WREG32(gen_cntl_reg, gen_cntl_val); 506 WREG32(gen_cntl_reg, gen_cntl_val);
506 507
507 crtc_offset = (u32)base; 508 crtc_offset = (u32)base;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e5b2cf10cbf4..8389b4c63d12 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -589,6 +589,20 @@ void radeon_ttm_fini(struct radeon_device *rdev)
589 DRM_INFO("radeon: ttm finalized\n"); 589 DRM_INFO("radeon: ttm finalized\n");
590} 590}
591 591
592/* this should only be called at bootup or when userspace
593 * isn't running */
594void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
595{
596 struct ttm_mem_type_manager *man;
597
598 if (!rdev->mman.initialized)
599 return;
600
601 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
602 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
603 man->size = size >> PAGE_SHIFT;
604}
605
592static struct vm_operations_struct radeon_ttm_vm_ops; 606static struct vm_operations_struct radeon_ttm_vm_ops;
593static const struct vm_operations_struct *ttm_vm_ops = NULL; 607static const struct vm_operations_struct *ttm_vm_ops = NULL;
594 608
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5afe294ed51f..8af4679db23e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -751,7 +751,6 @@ void rs600_mc_init(struct radeon_device *rdev)
751 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 751 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
752 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 752 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
753 rdev->mc.visible_vram_size = rdev->mc.aper_size; 753 rdev->mc.visible_vram_size = rdev->mc.aper_size;
754 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
755 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 754 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
756 base = RREG32_MC(R_000004_MC_FB_LOCATION); 755 base = RREG32_MC(R_000004_MC_FB_LOCATION);
757 base = G_000004_MC_FB_START(base) << 16; 756 base = G_000004_MC_FB_START(base) << 16;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 6638c8e4c81b..66c949b7c18c 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -157,7 +157,6 @@ void rs690_mc_init(struct radeon_device *rdev)
157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
159 rdev->mc.visible_vram_size = rdev->mc.aper_size; 159 rdev->mc.visible_vram_size = rdev->mc.aper_size;
160 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
161 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 160 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
162 base = G_000100_MC_FB_START(base) << 16; 161 base = G_000100_MC_FB_START(base) << 16;
163 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d8ba67690656..714ad45757d0 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -307,7 +307,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
307 */ 307 */
308void r700_cp_stop(struct radeon_device *rdev) 308void r700_cp_stop(struct radeon_device *rdev)
309{ 309{
310 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 310 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
311 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 311 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
312 WREG32(SCRATCH_UMSK, 0); 312 WREG32(SCRATCH_UMSK, 0);
313} 313}
@@ -1123,7 +1123,6 @@ int rv770_mc_init(struct radeon_device *rdev)
1123 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1123 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1124 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1124 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1125 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1125 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1126 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1127 r700_vram_gtt_location(rdev, &rdev->mc); 1126 r700_vram_gtt_location(rdev, &rdev->mc);
1128 radeon_update_bandwidth_info(rdev); 1127 radeon_update_bandwidth_info(rdev);
1129 1128
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 3f49dd376f02..6e06019015a5 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -37,7 +37,7 @@
37#define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */ 37#define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */
38#define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */ 38#define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */
39#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */ 39#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
40#define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */ 40#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
41 41
42#define SIO_REG_LDSEL 0x07 /* Logical device select */ 42#define SIO_REG_LDSEL 0x07 /* Logical device select */
43#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ 43#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
@@ -2111,7 +2111,6 @@ static int f71882fg_remove(struct platform_device *pdev)
2111 int nr_fans = (data->type == f71882fg) ? 4 : 3; 2111 int nr_fans = (data->type == f71882fg) ? 4 : 3;
2112 u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); 2112 u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
2113 2113
2114 platform_set_drvdata(pdev, NULL);
2115 if (data->hwmon_dev) 2114 if (data->hwmon_dev)
2116 hwmon_device_unregister(data->hwmon_dev); 2115 hwmon_device_unregister(data->hwmon_dev);
2117 2116
@@ -2178,6 +2177,7 @@ static int f71882fg_remove(struct platform_device *pdev)
2178 } 2177 }
2179 } 2178 }
2180 2179
2180 platform_set_drvdata(pdev, NULL);
2181 kfree(data); 2181 kfree(data);
2182 2182
2183 return 0; 2183 return 0;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 2e067dd2ee51..50ea1f43bdc1 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -29,6 +29,7 @@
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/ktime.h> 31#include <linux/ktime.h>
32#include <linux/slab.h>
32 33
33#define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */ 34#define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */
34#define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */ 35#define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index ef3bcb1ce864..61653f079671 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -249,7 +249,7 @@ static struct i2c_adapter ocores_adapter = {
249static int ocores_i2c_of_probe(struct platform_device* pdev, 249static int ocores_i2c_of_probe(struct platform_device* pdev,
250 struct ocores_i2c* i2c) 250 struct ocores_i2c* i2c)
251{ 251{
252 __be32* val; 252 const __be32* val;
253 253
254 val = of_get_property(pdev->dev.of_node, "regstep", NULL); 254 val = of_get_property(pdev->dev.of_node, "regstep", NULL);
255 if (!val) { 255 if (!val) {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 829a2a1029f7..58a58c7eaa17 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -378,9 +378,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
378 * REVISIT: Some wkup sources might not be needed. 378 * REVISIT: Some wkup sources might not be needed.
379 */ 379 */
380 dev->westate = OMAP_I2C_WE_ALL; 380 dev->westate = OMAP_I2C_WE_ALL;
381 if (dev->rev < OMAP_I2C_REV_ON_4430) 381 omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
382 omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
383 dev->westate);
384 } 382 }
385 } 383 }
386 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); 384 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 1fa091e05690..4a5c4a44ffb1 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -62,6 +62,7 @@
62#include <linux/notifier.h> 62#include <linux/notifier.h>
63#include <linux/cpu.h> 63#include <linux/cpu.h>
64#include <asm/mwait.h> 64#include <asm/mwait.h>
65#include <asm/msr.h>
65 66
66#define INTEL_IDLE_VERSION "0.4" 67#define INTEL_IDLE_VERSION "0.4"
67#define PREFIX "intel_idle: " 68#define PREFIX "intel_idle: "
@@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
85static struct cpuidle_state *cpuidle_state_table; 86static struct cpuidle_state *cpuidle_state_table;
86 87
87/* 88/*
89 * Hardware C-state auto-demotion may not always be optimal.
90 * Indicate which enable bits to clear here.
91 */
92static unsigned long long auto_demotion_disable_flags;
93
94/*
88 * Set this flag for states where the HW flushes the TLB for us 95 * Set this flag for states where the HW flushes the TLB for us
89 * and so we don't need cross-calls to keep it consistent. 96 * and so we don't need cross-calls to keep it consistent.
90 * If this flag is set, SW flushes the TLB, so even if the 97 * If this flag is set, SW flushes the TLB, so even if the
@@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = {
281 .notifier_call = setup_broadcast_cpuhp_notify, 288 .notifier_call = setup_broadcast_cpuhp_notify,
282}; 289};
283 290
291static void auto_demotion_disable(void *dummy)
292{
293 unsigned long long msr_bits;
294
295 rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
296 msr_bits &= ~auto_demotion_disable_flags;
297 wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
298}
299
284/* 300/*
285 * intel_idle_probe() 301 * intel_idle_probe()
286 */ 302 */
@@ -324,11 +340,17 @@ static int intel_idle_probe(void)
324 case 0x25: /* Westmere */ 340 case 0x25: /* Westmere */
325 case 0x2C: /* Westmere */ 341 case 0x2C: /* Westmere */
326 cpuidle_state_table = nehalem_cstates; 342 cpuidle_state_table = nehalem_cstates;
343 auto_demotion_disable_flags =
344 (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
327 break; 345 break;
328 346
329 case 0x1C: /* 28 - Atom Processor */ 347 case 0x1C: /* 28 - Atom Processor */
348 cpuidle_state_table = atom_cstates;
349 break;
350
330 case 0x26: /* 38 - Lincroft Atom Processor */ 351 case 0x26: /* 38 - Lincroft Atom Processor */
331 cpuidle_state_table = atom_cstates; 352 cpuidle_state_table = atom_cstates;
353 auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
332 break; 354 break;
333 355
334 case 0x2A: /* SNB */ 356 case 0x2A: /* SNB */
@@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
436 return -EIO; 458 return -EIO;
437 } 459 }
438 } 460 }
461 if (auto_demotion_disable_flags)
462 smp_call_function(auto_demotion_disable, NULL, 1);
439 463
440 return 0; 464 return 0;
441} 465}
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c
index 18f8798442fa..7bd5baa547be 100644
--- a/drivers/isdn/hardware/eicon/istream.c
+++ b/drivers/isdn/hardware/eicon/istream.c
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
62 stream interface. 62 stream interface.
63 If synchronous service was requested, then function 63 If synchronous service was requested, then function
64 does return amount of data written to stream. 64 does return amount of data written to stream.
65 'final' does indicate that pice of data to be written is 65 'final' does indicate that piece of data to be written is
66 final part of frame (necessary only by structured datatransfer) 66 final part of frame (necessary only by structured datatransfer)
67 return 0 if zero lengh packet was written 67 return 0 if zero lengh packet was written
68 return -1 if stream is full 68 return -1 if stream is full
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index bc6a67768af1..8c4852114eeb 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -658,13 +658,13 @@ static int tda8290_probe(struct tuner_i2c_props *i2c_props)
658#define TDA8290_ID 0x89 658#define TDA8290_ID 0x89
659 u8 reg = 0x1f, id; 659 u8 reg = 0x1f, id;
660 struct i2c_msg msg_read[] = { 660 struct i2c_msg msg_read[] = {
661 { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg }, 661 { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
662 { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, 662 { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
663 }; 663 };
664 664
665 /* detect tda8290 */ 665 /* detect tda8290 */
666 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { 666 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
667 printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", 667 printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
668 __func__, reg); 668 __func__, reg);
669 return -ENODEV; 669 return -ENODEV;
670 } 670 }
@@ -685,13 +685,13 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
685#define TDA8295C2_ID 0x8b 685#define TDA8295C2_ID 0x8b
686 u8 reg = 0x2f, id; 686 u8 reg = 0x2f, id;
687 struct i2c_msg msg_read[] = { 687 struct i2c_msg msg_read[] = {
688 { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg }, 688 { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
689 { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, 689 { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
690 }; 690 };
691 691
692 /* detect tda8290 */ 692 /* detect tda8295 */
693 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { 693 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
694 printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", 694 printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
695 __func__, reg); 695 __func__, reg);
696 return -ENODEV; 696 return -ENODEV;
697 } 697 }
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index defd83964ce2..193cdb77b76a 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -870,6 +870,23 @@ static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap)
870 return 0; 870 return 0;
871} 871}
872 872
873static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index,
874 u16 pid, int onoff)
875{
876 struct dib0700_state *st = adapter->dev->priv;
877 if (st->is_dib7000pc)
878 return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
879 return dib7000m_pid_filter(adapter->fe, index, pid, onoff);
880}
881
882static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
883{
884 struct dib0700_state *st = adapter->dev->priv;
885 if (st->is_dib7000pc)
886 return dib7000p_pid_filter_ctrl(adapter->fe, onoff);
887 return dib7000m_pid_filter_ctrl(adapter->fe, onoff);
888}
889
873static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) 890static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
874{ 891{
875 return dib7000p_pid_filter(adapter->fe, index, pid, onoff); 892 return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
@@ -1875,8 +1892,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1875 { 1892 {
1876 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 1893 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
1877 .pid_filter_count = 32, 1894 .pid_filter_count = 32,
1878 .pid_filter = stk70x0p_pid_filter, 1895 .pid_filter = stk7700p_pid_filter,
1879 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, 1896 .pid_filter_ctrl = stk7700p_pid_filter_ctrl,
1880 .frontend_attach = stk7700p_frontend_attach, 1897 .frontend_attach = stk7700p_frontend_attach,
1881 .tuner_attach = stk7700p_tuner_attach, 1898 .tuner_attach = stk7700p_tuner_attach,
1882 1899
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index 9eea4188303b..46ccd01a7696 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -659,7 +659,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
659} 659}
660 660
661/* Default firmware for LME2510C */ 661/* Default firmware for LME2510C */
662const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw"; 662char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
663 663
664static void lme_coldreset(struct usb_device *dev) 664static void lme_coldreset(struct usb_device *dev)
665{ 665{
@@ -1006,7 +1006,7 @@ static struct dvb_usb_device_properties lme2510c_properties = {
1006 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 1006 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
1007 .usb_ctrl = DEVICE_SPECIFIC, 1007 .usb_ctrl = DEVICE_SPECIFIC,
1008 .download_firmware = lme2510_download_firmware, 1008 .download_firmware = lme2510_download_firmware,
1009 .firmware = lme_firmware, 1009 .firmware = (const char *)&lme_firmware,
1010 .size_of_priv = sizeof(struct lme2510_state), 1010 .size_of_priv = sizeof(struct lme2510_state),
1011 .num_adapters = 1, 1011 .num_adapters = 1,
1012 .adapter = { 1012 .adapter = {
@@ -1109,5 +1109,5 @@ module_exit(lme2510_module_exit);
1109 1109
1110MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); 1110MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
1111MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); 1111MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
1112MODULE_VERSION("1.74"); 1112MODULE_VERSION("1.75");
1113MODULE_LICENSE("GPL"); 1113MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
index c7f5ccf54aa5..289a79837f24 100644
--- a/drivers/media/dvb/frontends/dib7000m.c
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -1285,6 +1285,25 @@ struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum di
1285} 1285}
1286EXPORT_SYMBOL(dib7000m_get_i2c_master); 1286EXPORT_SYMBOL(dib7000m_get_i2c_master);
1287 1287
1288int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
1289{
1290 struct dib7000m_state *state = fe->demodulator_priv;
1291 u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
1292 val |= (onoff & 0x1) << 4;
1293 dprintk("PID filter enabled %d", onoff);
1294 return dib7000m_write_word(state, 294 + state->reg_offs, val);
1295}
1296EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
1297
1298int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
1299{
1300 struct dib7000m_state *state = fe->demodulator_priv;
1301 dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
1302 return dib7000m_write_word(state, 300 + state->reg_offs + id,
1303 onoff ? (1 << 13) | pid : 0);
1304}
1305EXPORT_SYMBOL(dib7000m_pid_filter);
1306
1288#if 0 1307#if 0
1289/* used with some prototype boards */ 1308/* used with some prototype boards */
1290int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, 1309int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
diff --git a/drivers/media/dvb/frontends/dib7000m.h b/drivers/media/dvb/frontends/dib7000m.h
index 113819ce9f0d..81fcf2241c64 100644
--- a/drivers/media/dvb/frontends/dib7000m.h
+++ b/drivers/media/dvb/frontends/dib7000m.h
@@ -46,6 +46,8 @@ extern struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
46extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *, 46extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *,
47 enum dibx000_i2c_interface, 47 enum dibx000_i2c_interface,
48 int); 48 int);
49extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
50extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
49#else 51#else
50static inline 52static inline
51struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap, 53struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
@@ -63,6 +65,19 @@ struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *demod,
63 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 65 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
64 return NULL; 66 return NULL;
65} 67}
68static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id,
69 u16 pid, u8 onoff)
70{
71 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
72 return -ENODEV;
73}
74
75static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe,
76 uint8_t onoff)
77{
78 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
79 return -ENODEV;
80}
66#endif 81#endif
67 82
68/* TODO 83/* TODO
diff --git a/drivers/media/dvb/mantis/mantis_pci.c b/drivers/media/dvb/mantis/mantis_pci.c
index 59feeb84aec7..10a432a79d00 100644
--- a/drivers/media/dvb/mantis/mantis_pci.c
+++ b/drivers/media/dvb/mantis/mantis_pci.c
@@ -22,7 +22,6 @@
22#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/pgtable.h>
26#include <asm/page.h> 25#include <asm/page.h>
27#include <linux/kmod.h> 26#include <linux/kmod.h>
28#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 73230ff93b8a..01f258a2a57a 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -112,7 +112,7 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
112{ 112{
113 ktime_t now; 113 ktime_t now;
114 s64 delta; /* ns */ 114 s64 delta; /* ns */
115 struct ir_raw_event ev; 115 DEFINE_IR_RAW_EVENT(ev);
116 int rc = 0; 116 int rc = 0;
117 117
118 if (!dev->raw) 118 if (!dev->raw)
@@ -125,7 +125,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
125 * being called for the first time, note that delta can't 125 * being called for the first time, note that delta can't
126 * possibly be negative. 126 * possibly be negative.
127 */ 127 */
128 ev.duration = 0;
129 if (delta > IR_MAX_DURATION || !dev->raw->last_type) 128 if (delta > IR_MAX_DURATION || !dev->raw->last_type)
130 type |= IR_START_EVENT; 129 type |= IR_START_EVENT;
131 else 130 else
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 6df0a4980645..e4f8eac7f717 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -148,6 +148,7 @@ enum mceusb_model_type {
148 MCE_GEN2_TX_INV, 148 MCE_GEN2_TX_INV,
149 POLARIS_EVK, 149 POLARIS_EVK,
150 CX_HYBRID_TV, 150 CX_HYBRID_TV,
151 MULTIFUNCTION,
151}; 152};
152 153
153struct mceusb_model { 154struct mceusb_model {
@@ -155,9 +156,10 @@ struct mceusb_model {
155 u32 mce_gen2:1; 156 u32 mce_gen2:1;
156 u32 mce_gen3:1; 157 u32 mce_gen3:1;
157 u32 tx_mask_normal:1; 158 u32 tx_mask_normal:1;
158 u32 is_polaris:1;
159 u32 no_tx:1; 159 u32 no_tx:1;
160 160
161 int ir_intfnum;
162
161 const char *rc_map; /* Allow specify a per-board map */ 163 const char *rc_map; /* Allow specify a per-board map */
162 const char *name; /* per-board name */ 164 const char *name; /* per-board name */
163}; 165};
@@ -179,7 +181,6 @@ static const struct mceusb_model mceusb_model[] = {
179 .tx_mask_normal = 1, 181 .tx_mask_normal = 1,
180 }, 182 },
181 [POLARIS_EVK] = { 183 [POLARIS_EVK] = {
182 .is_polaris = 1,
183 /* 184 /*
184 * In fact, the EVK is shipped without 185 * In fact, the EVK is shipped without
185 * remotes, but we should have something handy, 186 * remotes, but we should have something handy,
@@ -189,10 +190,13 @@ static const struct mceusb_model mceusb_model[] = {
189 .name = "Conexant Hybrid TV (cx231xx) MCE IR", 190 .name = "Conexant Hybrid TV (cx231xx) MCE IR",
190 }, 191 },
191 [CX_HYBRID_TV] = { 192 [CX_HYBRID_TV] = {
192 .is_polaris = 1,
193 .no_tx = 1, /* tx isn't wired up at all */ 193 .no_tx = 1, /* tx isn't wired up at all */
194 .name = "Conexant Hybrid TV (cx231xx) MCE IR", 194 .name = "Conexant Hybrid TV (cx231xx) MCE IR",
195 }, 195 },
196 [MULTIFUNCTION] = {
197 .mce_gen2 = 1,
198 .ir_intfnum = 2,
199 },
196}; 200};
197 201
198static struct usb_device_id mceusb_dev_table[] = { 202static struct usb_device_id mceusb_dev_table[] = {
@@ -216,8 +220,9 @@ static struct usb_device_id mceusb_dev_table[] = {
216 { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, 220 { USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
217 /* Philips/Spinel plus IR transceiver for ASUS */ 221 /* Philips/Spinel plus IR transceiver for ASUS */
218 { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, 222 { USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
219 /* Realtek MCE IR Receiver */ 223 /* Realtek MCE IR Receiver and card reader */
220 { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, 224 { USB_DEVICE(VENDOR_REALTEK, 0x0161),
225 .driver_info = MULTIFUNCTION },
221 /* SMK/Toshiba G83C0004D410 */ 226 /* SMK/Toshiba G83C0004D410 */
222 { USB_DEVICE(VENDOR_SMK, 0x031d), 227 { USB_DEVICE(VENDOR_SMK, 0x031d),
223 .driver_info = MCE_GEN2_TX_INV }, 228 .driver_info = MCE_GEN2_TX_INV },
@@ -1101,7 +1106,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
1101 bool is_gen3; 1106 bool is_gen3;
1102 bool is_microsoft_gen1; 1107 bool is_microsoft_gen1;
1103 bool tx_mask_normal; 1108 bool tx_mask_normal;
1104 bool is_polaris; 1109 int ir_intfnum;
1105 1110
1106 dev_dbg(&intf->dev, "%s called\n", __func__); 1111 dev_dbg(&intf->dev, "%s called\n", __func__);
1107 1112
@@ -1110,13 +1115,11 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
1110 is_gen3 = mceusb_model[model].mce_gen3; 1115 is_gen3 = mceusb_model[model].mce_gen3;
1111 is_microsoft_gen1 = mceusb_model[model].mce_gen1; 1116 is_microsoft_gen1 = mceusb_model[model].mce_gen1;
1112 tx_mask_normal = mceusb_model[model].tx_mask_normal; 1117 tx_mask_normal = mceusb_model[model].tx_mask_normal;
1113 is_polaris = mceusb_model[model].is_polaris; 1118 ir_intfnum = mceusb_model[model].ir_intfnum;
1114 1119
1115 if (is_polaris) { 1120 /* There are multi-function devices with non-IR interfaces */
1116 /* Interface 0 is IR */ 1121 if (idesc->desc.bInterfaceNumber != ir_intfnum)
1117 if (idesc->desc.bInterfaceNumber) 1122 return -ENODEV;
1118 return -ENODEV;
1119 }
1120 1123
1121 /* step through the endpoints to find first bulk in and out endpoint */ 1124 /* step through the endpoints to find first bulk in and out endpoint */
1122 for (i = 0; i < idesc->desc.bNumEndpoints; ++i) { 1125 for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 273d9d674792..d4d64492a057 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -385,8 +385,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt)
385 385
386static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) 386static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
387{ 387{
388 /* set number of bytes needed for wake key comparison (default 67) */ 388 /* set number of bytes needed for wake from s3 (default 65) */
389 nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP); 389 nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
390 CIR_WAKE_FIFO_CMP_DEEP);
390 391
391 /* set tolerance/variance allowed per byte during wake compare */ 392 /* set tolerance/variance allowed per byte during wake compare */
392 nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE, 393 nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 1df82351cb03..048135eea702 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -305,8 +305,11 @@ struct nvt_dev {
305#define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20 305#define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20
306#define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10 306#define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10
307 307
308/* CIR Wake FIFO buffer is 67 bytes long */ 308/*
309#define CIR_WAKE_FIFO_LEN 67 309 * The CIR Wake FIFO buffer is 67 bytes long, but the stock remote wakes
310 * the system comparing only 65 bytes (fails with this set to 67)
311 */
312#define CIR_WAKE_FIFO_CMP_BYTES 65
310/* CIR Wake byte comparison tolerance */ 313/* CIR Wake byte comparison tolerance */
311#define CIR_WAKE_CMP_TOLERANCE 5 314#define CIR_WAKE_CMP_TOLERANCE 5
312 315
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 512a2f4ada0e..5b4422ef4e6d 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -850,7 +850,7 @@ static ssize_t store_protocols(struct device *device,
850 count++; 850 count++;
851 } else { 851 } else {
852 for (i = 0; i < ARRAY_SIZE(proto_names); i++) { 852 for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
853 if (!strncasecmp(tmp, proto_names[i].name, strlen(proto_names[i].name))) { 853 if (!strcasecmp(tmp, proto_names[i].name)) {
854 tmp += strlen(proto_names[i].name); 854 tmp += strlen(proto_names[i].name);
855 mask = proto_names[i].type; 855 mask = proto_names[i].type;
856 break; 856 break;
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index e41e4ad5cc40..9c475c600fc9 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -1758,7 +1758,12 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1758 if (rc < 0) 1758 if (rc < 0)
1759 return rc; 1759 return rc;
1760 1760
1761 return videobuf_reqbufs(&fh->vb_vidq, rb); 1761 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1762 rc = videobuf_reqbufs(&fh->vb_vidq, rb);
1763 else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
1764 rc = videobuf_reqbufs(&fh->vb_vbiq, rb);
1765
1766 return rc;
1762} 1767}
1763 1768
1764static int vidioc_querybuf(struct file *file, void *priv, 1769static int vidioc_querybuf(struct file *file, void *priv,
@@ -1772,7 +1777,12 @@ static int vidioc_querybuf(struct file *file, void *priv,
1772 if (rc < 0) 1777 if (rc < 0)
1773 return rc; 1778 return rc;
1774 1779
1775 return videobuf_querybuf(&fh->vb_vidq, b); 1780 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1781 rc = videobuf_querybuf(&fh->vb_vidq, b);
1782 else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
1783 rc = videobuf_querybuf(&fh->vb_vbiq, b);
1784
1785 return rc;
1776} 1786}
1777 1787
1778static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) 1788static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1785,7 +1795,12 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
1785 if (rc < 0) 1795 if (rc < 0)
1786 return rc; 1796 return rc;
1787 1797
1788 return videobuf_qbuf(&fh->vb_vidq, b); 1798 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1799 rc = videobuf_qbuf(&fh->vb_vidq, b);
1800 else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
1801 rc = videobuf_qbuf(&fh->vb_vbiq, b);
1802
1803 return rc;
1789} 1804}
1790 1805
1791static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) 1806static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1806,7 +1821,12 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
1806 dev->greenscreen_detected = 0; 1821 dev->greenscreen_detected = 0;
1807 } 1822 }
1808 1823
1809 return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); 1824 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1825 rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
1826 else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
1827 rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK);
1828
1829 return rc;
1810} 1830}
1811 1831
1812static struct v4l2_file_operations au0828_v4l_fops = { 1832static struct v4l2_file_operations au0828_v4l_fops = {
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index 87177733cf92..68ad1963f421 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -95,6 +95,53 @@ static const struct cx18_card cx18_card_hvr1600_esmt = {
95 .i2c = &cx18_i2c_std, 95 .i2c = &cx18_i2c_std,
96}; 96};
97 97
98static const struct cx18_card cx18_card_hvr1600_s5h1411 = {
99 .type = CX18_CARD_HVR_1600_S5H1411,
100 .name = "Hauppauge HVR-1600",
101 .comment = "Simultaneous Digital and Analog TV capture supported\n",
102 .v4l2_capabilities = CX18_CAP_ENCODER,
103 .hw_audio_ctrl = CX18_HW_418_AV,
104 .hw_muxer = CX18_HW_CS5345,
105 .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
106 CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL |
107 CX18_HW_Z8F0811_IR_HAUP,
108 .video_inputs = {
109 { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 },
110 { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 },
111 { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 },
112 { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 },
113 { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 },
114 },
115 .audio_inputs = {
116 { CX18_CARD_INPUT_AUD_TUNER,
117 CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
118 { CX18_CARD_INPUT_LINE_IN1,
119 CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 },
120 { CX18_CARD_INPUT_LINE_IN2,
121 CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 },
122 },
123 .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
124 CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 },
125 .ddr = {
126 /* ESMT M13S128324A-5B memory */
127 .chip_config = 0x003,
128 .refresh = 0x30c,
129 .timing1 = 0x44220e82,
130 .timing2 = 0x08,
131 .tune_lane = 0,
132 .initial_emrs = 0,
133 },
134 .gpio_init.initial_value = 0x3001,
135 .gpio_init.direction = 0x3001,
136 .gpio_i2c_slave_reset = {
137 .active_lo_mask = 0x3001,
138 .msecs_asserted = 10,
139 .msecs_recovery = 40,
140 .ir_reset_mask = 0x0001,
141 },
142 .i2c = &cx18_i2c_std,
143};
144
98static const struct cx18_card cx18_card_hvr1600_samsung = { 145static const struct cx18_card cx18_card_hvr1600_samsung = {
99 .type = CX18_CARD_HVR_1600_SAMSUNG, 146 .type = CX18_CARD_HVR_1600_SAMSUNG,
100 .name = "Hauppauge HVR-1600 (Preproduction)", 147 .name = "Hauppauge HVR-1600 (Preproduction)",
@@ -523,7 +570,8 @@ static const struct cx18_card *cx18_card_list[] = {
523 &cx18_card_toshiba_qosmio_dvbt, 570 &cx18_card_toshiba_qosmio_dvbt,
524 &cx18_card_leadtek_pvr2100, 571 &cx18_card_leadtek_pvr2100,
525 &cx18_card_leadtek_dvr3100h, 572 &cx18_card_leadtek_dvr3100h,
526 &cx18_card_gotview_dvd3 573 &cx18_card_gotview_dvd3,
574 &cx18_card_hvr1600_s5h1411
527}; 575};
528 576
529const struct cx18_card *cx18_get_card(u16 index) 577const struct cx18_card *cx18_get_card(u16 index)
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 944af8adbe0c..b1c3cbd92743 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -157,6 +157,7 @@ MODULE_PARM_DESC(cardtype,
157 "\t\t\t 7 = Leadtek WinFast PVR2100\n" 157 "\t\t\t 7 = Leadtek WinFast PVR2100\n"
158 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n" 158 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n"
159 "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n" 159 "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n"
160 "\t\t\t 10 = Hauppauge HVR 1600 (S5H1411)\n"
160 "\t\t\t 0 = Autodetect (default)\n" 161 "\t\t\t 0 = Autodetect (default)\n"
161 "\t\t\t-1 = Ignore this card\n\t\t"); 162 "\t\t\t-1 = Ignore this card\n\t\t");
162MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60"); 163MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
@@ -337,6 +338,7 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
337 switch (cx->card->type) { 338 switch (cx->card->type) {
338 case CX18_CARD_HVR_1600_ESMT: 339 case CX18_CARD_HVR_1600_ESMT:
339 case CX18_CARD_HVR_1600_SAMSUNG: 340 case CX18_CARD_HVR_1600_SAMSUNG:
341 case CX18_CARD_HVR_1600_S5H1411:
340 tveeprom_hauppauge_analog(&c, tv, eedata); 342 tveeprom_hauppauge_analog(&c, tv, eedata);
341 break; 343 break;
342 case CX18_CARD_YUAN_MPC718: 344 case CX18_CARD_YUAN_MPC718:
@@ -365,7 +367,25 @@ static void cx18_process_eeprom(struct cx18 *cx)
365 from the model number. Use the cardtype module option if you 367 from the model number. Use the cardtype module option if you
366 have one of these preproduction models. */ 368 have one of these preproduction models. */
367 switch (tv.model) { 369 switch (tv.model) {
368 case 74000 ... 74999: 370 case 74301: /* Retail models */
371 case 74321:
372 case 74351: /* OEM models */
373 case 74361:
374 /* Digital side is s5h1411/tda18271 */
375 cx->card = cx18_get_card(CX18_CARD_HVR_1600_S5H1411);
376 break;
377 case 74021: /* Retail models */
378 case 74031:
379 case 74041:
380 case 74141:
381 case 74541: /* OEM models */
382 case 74551:
383 case 74591:
384 case 74651:
385 case 74691:
386 case 74751:
387 case 74891:
388 /* Digital side is s5h1409/mxl5005s */
369 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); 389 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
370 break; 390 break;
371 case 0x718: 391 case 0x718:
@@ -377,7 +397,8 @@ static void cx18_process_eeprom(struct cx18 *cx)
377 CX18_ERR("Invalid EEPROM\n"); 397 CX18_ERR("Invalid EEPROM\n");
378 return; 398 return;
379 default: 399 default:
380 CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model); 400 CX18_ERR("Unknown model %d, defaulting to original HVR-1600 "
401 "(cardtype=1)\n", tv.model);
381 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); 402 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
382 break; 403 break;
383 } 404 }
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 306caac6d3fc..f736679d2517 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -85,7 +85,8 @@
85#define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */ 85#define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */
86#define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */ 86#define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */
87#define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */ 87#define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */
88#define CX18_CARD_LAST 8 88#define CX18_CARD_HVR_1600_S5H1411 9 /* Hauppauge HVR 1600 s5h1411/tda18271*/
89#define CX18_CARD_LAST 9
89 90
90#define CX18_ENC_STREAM_TYPE_MPG 0 91#define CX18_ENC_STREAM_TYPE_MPG 0
91#define CX18_ENC_STREAM_TYPE_TS 1 92#define CX18_ENC_STREAM_TYPE_TS 1
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index f0381d62518d..f41922bd4020 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -29,6 +29,8 @@
29#include "cx18-gpio.h" 29#include "cx18-gpio.h"
30#include "s5h1409.h" 30#include "s5h1409.h"
31#include "mxl5005s.h" 31#include "mxl5005s.h"
32#include "s5h1411.h"
33#include "tda18271.h"
32#include "zl10353.h" 34#include "zl10353.h"
33 35
34#include <linux/firmware.h> 36#include <linux/firmware.h>
@@ -77,6 +79,32 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
77}; 79};
78 80
79/* 81/*
82 * CX18_CARD_HVR_1600_S5H1411
83 */
84static struct s5h1411_config hcw_s5h1411_config = {
85 .output_mode = S5H1411_SERIAL_OUTPUT,
86 .gpio = S5H1411_GPIO_OFF,
87 .vsb_if = S5H1411_IF_44000,
88 .qam_if = S5H1411_IF_4000,
89 .inversion = S5H1411_INVERSION_ON,
90 .status_mode = S5H1411_DEMODLOCKING,
91 .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
92};
93
94static struct tda18271_std_map hauppauge_tda18271_std_map = {
95 .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3,
96 .if_lvl = 6, .rfagc_top = 0x37 },
97 .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0,
98 .if_lvl = 6, .rfagc_top = 0x37 },
99};
100
101static struct tda18271_config hauppauge_tda18271_config = {
102 .std_map = &hauppauge_tda18271_std_map,
103 .gate = TDA18271_GATE_DIGITAL,
104 .output_opt = TDA18271_OUTPUT_LT_OFF,
105};
106
107/*
80 * CX18_CARD_LEADTEK_DVR3100H 108 * CX18_CARD_LEADTEK_DVR3100H
81 */ 109 */
82/* Information/confirmation of proper config values provided by Terry Wu */ 110/* Information/confirmation of proper config values provided by Terry Wu */
@@ -244,6 +272,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
244 switch (cx->card->type) { 272 switch (cx->card->type) {
245 case CX18_CARD_HVR_1600_ESMT: 273 case CX18_CARD_HVR_1600_ESMT:
246 case CX18_CARD_HVR_1600_SAMSUNG: 274 case CX18_CARD_HVR_1600_SAMSUNG:
275 case CX18_CARD_HVR_1600_S5H1411:
247 v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL); 276 v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL);
248 v |= 0x00400000; /* Serial Mode */ 277 v |= 0x00400000; /* Serial Mode */
249 v |= 0x00002000; /* Data Length - Byte */ 278 v |= 0x00002000; /* Data Length - Byte */
@@ -455,6 +484,15 @@ static int dvb_register(struct cx18_stream *stream)
455 ret = 0; 484 ret = 0;
456 } 485 }
457 break; 486 break;
487 case CX18_CARD_HVR_1600_S5H1411:
488 dvb->fe = dvb_attach(s5h1411_attach,
489 &hcw_s5h1411_config,
490 &cx->i2c_adap[0]);
491 if (dvb->fe != NULL)
492 dvb_attach(tda18271_attach, dvb->fe,
493 0x60, &cx->i2c_adap[0],
494 &hauppauge_tda18271_config);
495 break;
458 case CX18_CARD_LEADTEK_DVR3100H: 496 case CX18_CARD_LEADTEK_DVR3100H:
459 dvb->fe = dvb_attach(zl10353_attach, 497 dvb->fe = dvb_attach(zl10353_attach,
460 &leadtek_dvr3100h_demod, 498 &leadtek_dvr3100h_demod,
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index ed3d8f55029b..307ff543c254 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -122,10 +122,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
122 122
123 if (!i2c_wait_done(i2c_adap)) 123 if (!i2c_wait_done(i2c_adap))
124 goto eio; 124 goto eio;
125 if (!i2c_slave_did_ack(i2c_adap)) {
126 retval = -ENXIO;
127 goto err;
128 }
129 if (i2c_debug) { 125 if (i2c_debug) {
130 printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]); 126 printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
131 if (!(ctrl & I2C_NOSTOP)) 127 if (!(ctrl & I2C_NOSTOP))
@@ -158,7 +154,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
158 154
159 eio: 155 eio:
160 retval = -EIO; 156 retval = -EIO;
161 err:
162 if (i2c_debug) 157 if (i2c_debug)
163 printk(KERN_ERR " ERR: %d\n", retval); 158 printk(KERN_ERR " ERR: %d\n", retval);
164 return retval; 159 return retval;
@@ -209,10 +204,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
209 204
210 if (!i2c_wait_done(i2c_adap)) 205 if (!i2c_wait_done(i2c_adap))
211 goto eio; 206 goto eio;
212 if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) {
213 retval = -ENXIO;
214 goto err;
215 }
216 msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; 207 msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff;
217 if (i2c_debug) { 208 if (i2c_debug) {
218 dprintk(1, " %02x", msg->buf[cnt]); 209 dprintk(1, " %02x", msg->buf[cnt]);
@@ -224,7 +215,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
224 215
225 eio: 216 eio:
226 retval = -EIO; 217 retval = -EIO;
227 err:
228 if (i2c_debug) 218 if (i2c_debug)
229 printk(KERN_ERR " ERR: %d\n", retval); 219 printk(KERN_ERR " ERR: %d\n", retval);
230 return retval; 220 return retval;
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 6fc09dd41b9d..35796e035247 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -2015,7 +2015,8 @@ static int cx25840_probe(struct i2c_client *client,
2015 kfree(state); 2015 kfree(state);
2016 return err; 2016 return err;
2017 } 2017 }
2018 v4l2_ctrl_cluster(2, &state->volume); 2018 if (!is_cx2583x(state))
2019 v4l2_ctrl_cluster(2, &state->volume);
2019 v4l2_ctrl_handler_setup(&state->hdl); 2020 v4l2_ctrl_handler_setup(&state->hdl);
2020 2021
2021 if (client->dev.platform_data) { 2022 if (client->dev.platform_data) {
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 9b4faf009196..9c29e964d400 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -628,22 +628,66 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
628static void ivtv_irq_dma_err(struct ivtv *itv) 628static void ivtv_irq_dma_err(struct ivtv *itv)
629{ 629{
630 u32 data[CX2341X_MBOX_MAX_DATA]; 630 u32 data[CX2341X_MBOX_MAX_DATA];
631 u32 status;
631 632
632 del_timer(&itv->dma_timer); 633 del_timer(&itv->dma_timer);
634
633 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); 635 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
636 status = read_reg(IVTV_REG_DMASTATUS);
634 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], 637 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
635 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); 638 status, itv->cur_dma_stream);
636 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); 639 /*
640 * We do *not* write back to the IVTV_REG_DMASTATUS register to
641 * clear the error status, if either the encoder write (0x02) or
642 * decoder read (0x01) bus master DMA operation do not indicate
643 * completed. We can race with the DMA engine, which may have
644 * transitioned to completed status *after* we read the register.
645 * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
646 * DMA engine has completed, will cause the DMA engine to stop working.
647 */
648 status &= 0x3;
649 if (status == 0x3)
650 write_reg(status, IVTV_REG_DMASTATUS);
651
637 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && 652 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
638 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { 653 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
639 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; 654 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
640 655
641 /* retry */ 656 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
642 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) 657 /* retry */
658 /*
659 * FIXME - handle cases of DMA error similar to
660 * encoder below, except conditioned on status & 0x1
661 */
643 ivtv_dma_dec_start(s); 662 ivtv_dma_dec_start(s);
644 else 663 return;
645 ivtv_dma_enc_start(s); 664 } else {
646 return; 665 if ((status & 0x2) == 0) {
666 /*
667 * CX2341x Bus Master DMA write is ongoing.
668 * Reset the timer and let it complete.
669 */
670 itv->dma_timer.expires =
671 jiffies + msecs_to_jiffies(600);
672 add_timer(&itv->dma_timer);
673 return;
674 }
675
676 if (itv->dma_retries < 3) {
677 /*
678 * CX2341x Bus Master DMA write has ended.
679 * Retry the write, starting with the first
680 * xfer segment. Just retrying the current
681 * segment is not sufficient.
682 */
683 s->sg_processed = 0;
684 itv->dma_retries++;
685 ivtv_dma_enc_start_xfer(s);
686 return;
687 }
688 /* Too many retries, give up on this one */
689 }
690
647 } 691 }
648 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { 692 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
649 ivtv_udma_start(itv); 693 ivtv_udma_start(itv);
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index c179041d91f8..e7e717800ee2 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -1011,7 +1011,6 @@ static int m2mtest_remove(struct platform_device *pdev)
1011 v4l2_m2m_release(dev->m2m_dev); 1011 v4l2_m2m_release(dev->m2m_dev);
1012 del_timer_sync(&dev->timer); 1012 del_timer_sync(&dev->timer);
1013 video_unregister_device(dev->vfd); 1013 video_unregister_device(dev->vfd);
1014 video_device_release(dev->vfd);
1015 v4l2_device_unregister(&dev->v4l2_dev); 1014 v4l2_device_unregister(&dev->v4l2_dev);
1016 kfree(dev); 1015 kfree(dev);
1017 1016
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index b63f8cafa671..561909b65ce6 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -57,7 +57,7 @@
57#include <linux/usb.h> 57#include <linux/usb.h>
58 58
59#define S2255_MAJOR_VERSION 1 59#define S2255_MAJOR_VERSION 1
60#define S2255_MINOR_VERSION 20 60#define S2255_MINOR_VERSION 21
61#define S2255_RELEASE 0 61#define S2255_RELEASE 0
62#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \ 62#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
63 S2255_MINOR_VERSION, \ 63 S2255_MINOR_VERSION, \
@@ -312,9 +312,9 @@ struct s2255_fh {
312}; 312};
313 313
314/* current cypress EEPROM firmware version */ 314/* current cypress EEPROM firmware version */
315#define S2255_CUR_USB_FWVER ((3 << 8) | 6) 315#define S2255_CUR_USB_FWVER ((3 << 8) | 11)
316/* current DSP FW version */ 316/* current DSP FW version */
317#define S2255_CUR_DSP_FWVER 8 317#define S2255_CUR_DSP_FWVER 10102
318/* Need DSP version 5+ for video status feature */ 318/* Need DSP version 5+ for video status feature */
319#define S2255_MIN_DSP_STATUS 5 319#define S2255_MIN_DSP_STATUS 5
320#define S2255_MIN_DSP_COLORFILTER 8 320#define S2255_MIN_DSP_COLORFILTER 8
@@ -492,9 +492,11 @@ static void planar422p_to_yuv_packed(const unsigned char *in,
492 492
493static void s2255_reset_dsppower(struct s2255_dev *dev) 493static void s2255_reset_dsppower(struct s2255_dev *dev)
494{ 494{
495 s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b0b, NULL, 0, 1); 495 s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1);
496 msleep(10); 496 msleep(10);
497 s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1); 497 s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
498 msleep(600);
499 s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1);
498 return; 500 return;
499} 501}
500 502
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 6a1f94042612..c45e6305b26f 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
143 unsigned long flags; 143 unsigned long flags;
144 struct asic3 *asic; 144 struct asic3 *asic;
145 145
146 desc->chip->ack(irq); 146 desc->irq_data.chip->irq_ack(&desc->irq_data);
147 147
148 asic = desc->handler_data; 148 asic = get_irq_data(irq);
149 149
150 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { 150 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
151 u32 status; 151 u32 status;
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 33c923d215c7..fdd8a1b8bc67 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
118 118
119 /* Voice codec interface client */ 119 /* Voice codec interface client */
120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; 120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
121 cell->name = "davinci_vcif"; 121 cell->name = "davinci-vcif";
122 cell->driver_data = davinci_vc; 122 cell->driver_data = davinci_vc;
123 123
124 /* Voice codec CQ93VC client */ 124 /* Voice codec CQ93VC client */
125 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; 125 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
126 cell->name = "cq93vc"; 126 cell->name = "cq93vc-codec";
127 cell->driver_data = davinci_vc; 127 cell->driver_data = davinci_vc;
128 128
129 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, 129 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 627cf577b16d..e9018d1394ee 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
150static inline int __tps6586x_writes(struct i2c_client *client, int reg, 150static inline int __tps6586x_writes(struct i2c_client *client, int reg,
151 int len, uint8_t *val) 151 int len, uint8_t *val)
152{ 152{
153 int ret; 153 int ret, i;
154 154
155 ret = i2c_smbus_write_i2c_block_data(client, reg, len, val); 155 for (i = 0; i < len; i++) {
156 if (ret < 0) { 156 ret = __tps6586x_write(client, reg + i, *(val + i));
157 dev_err(&client->dev, "failed writings to 0x%02x\n", reg); 157 if (ret < 0)
158 return ret; 158 return ret;
159 } 159 }
160 160
161 return 0; 161 return 0;
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 000cb414a78a..92b85e28a15e 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
385 idev->close = ucb1x00_ts_close; 385 idev->close = ucb1x00_ts_close;
386 386
387 __set_bit(EV_ABS, idev->evbit); 387 __set_bit(EV_ABS, idev->evbit);
388 __set_bit(ABS_X, idev->absbit);
389 __set_bit(ABS_Y, idev->absbit);
390 __set_bit(ABS_PRESSURE, idev->absbit);
391 388
392 input_set_drvdata(idev, ts); 389 input_set_drvdata(idev, ts);
393 390
391 ucb1x00_adc_enable(ts->ucb);
392 ts->x_res = ucb1x00_ts_read_xres(ts);
393 ts->y_res = ucb1x00_ts_read_yres(ts);
394 ucb1x00_adc_disable(ts->ucb);
395
396 input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
397 input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
398 input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
399
394 err = input_register_device(idev); 400 err = input_register_device(idev);
395 if (err) 401 if (err)
396 goto fail; 402 goto fail;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 41233c7fa581..f4016a075fd6 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev)
246 struct wm8994 *wm8994 = dev_get_drvdata(dev); 246 struct wm8994 *wm8994 = dev_get_drvdata(dev);
247 int ret; 247 int ret;
248 248
249 /* Don't actually go through with the suspend if the CODEC is
250 * still active (eg, for audio passthrough from CP. */
251 ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
252 if (ret < 0) {
253 dev_err(dev, "Failed to read power status: %d\n", ret);
254 } else if (ret & WM8994_VMID_SEL_MASK) {
255 dev_dbg(dev, "CODEC still active, ignoring suspend\n");
256 return 0;
257 }
258
249 /* GPIO configuration state is saved here since we may be configuring 259 /* GPIO configuration state is saved here since we may be configuring
250 * the GPIO alternate functions even if we're not using the gpiolib 260 * the GPIO alternate functions even if we're not using the gpiolib
251 * driver for them. 261 * driver for them.
@@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev)
261 if (ret < 0) 271 if (ret < 0)
262 dev_err(dev, "Failed to save LDO registers: %d\n", ret); 272 dev_err(dev, "Failed to save LDO registers: %d\n", ret);
263 273
274 wm8994->suspended = true;
275
264 ret = regulator_bulk_disable(wm8994->num_supplies, 276 ret = regulator_bulk_disable(wm8994->num_supplies,
265 wm8994->supplies); 277 wm8994->supplies);
266 if (ret != 0) { 278 if (ret != 0) {
@@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev)
276 struct wm8994 *wm8994 = dev_get_drvdata(dev); 288 struct wm8994 *wm8994 = dev_get_drvdata(dev);
277 int ret; 289 int ret;
278 290
291 /* We may have lied to the PM core about suspending */
292 if (!wm8994->suspended)
293 return 0;
294
279 ret = regulator_bulk_enable(wm8994->num_supplies, 295 ret = regulator_bulk_enable(wm8994->num_supplies,
280 wm8994->supplies); 296 wm8994->supplies);
281 if (ret != 0) { 297 if (ret != 0) {
@@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev)
298 if (ret < 0) 314 if (ret < 0)
299 dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); 315 dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
300 316
317 wm8994->suspended = false;
318
301 return 0; 319 return 0;
302} 320}
303#endif 321#endif
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 63ee4c1a5315..b6e1c9a6679e 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -449,6 +449,7 @@ static const struct i2c_device_id bmp085_id[] = {
449 { "bmp085", 0 }, 449 { "bmp085", 0 },
450 { } 450 { }
451}; 451};
452MODULE_DEVICE_TABLE(i2c, bmp085_id);
452 453
453static struct i2c_driver bmp085_driver = { 454static struct i2c_driver bmp085_driver = {
454 .driver = { 455 .driver = {
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 6625c057be05..150b5f3cd401 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1529,7 +1529,7 @@ void mmc_rescan(struct work_struct *work)
1529 * still present 1529 * still present
1530 */ 1530 */
1531 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 1531 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
1532 && mmc_card_is_removable(host)) 1532 && !(host->caps & MMC_CAP_NONREMOVABLE))
1533 host->bus_ops->detect(host); 1533 host->bus_ops->detect(host);
1534 1534
1535 /* 1535 /*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 5c4a54d9b6a4..ebc62ad4cc56 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -792,7 +792,6 @@ int mmc_attach_sdio(struct mmc_host *host)
792 */ 792 */
793 mmc_release_host(host); 793 mmc_release_host(host);
794 err = mmc_add_card(host->card); 794 err = mmc_add_card(host->card);
795 mmc_claim_host(host);
796 if (err) 795 if (err)
797 goto remove_added; 796 goto remove_added;
798 797
@@ -805,12 +804,12 @@ int mmc_attach_sdio(struct mmc_host *host)
805 goto remove_added; 804 goto remove_added;
806 } 805 }
807 806
807 mmc_claim_host(host);
808 return 0; 808 return 0;
809 809
810 810
811remove_added: 811remove_added:
812 /* Remove without lock if the device has been added. */ 812 /* Remove without lock if the device has been added. */
813 mmc_release_host(host);
814 mmc_sdio_remove(host); 813 mmc_sdio_remove(host);
815 mmc_claim_host(host); 814 mmc_claim_host(host);
816remove: 815remove:
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index a8c3e1c9b02a..4aaa88f8ab5f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -1230,10 +1230,32 @@ static int inval_cache_and_wait_for_operation(
1230 sleep_time = chip_op_time / 2; 1230 sleep_time = chip_op_time / 2;
1231 1231
1232 for (;;) { 1232 for (;;) {
1233 if (chip->state != chip_state) {
1234 /* Someone's suspended the operation: sleep */
1235 DECLARE_WAITQUEUE(wait, current);
1236 set_current_state(TASK_UNINTERRUPTIBLE);
1237 add_wait_queue(&chip->wq, &wait);
1238 mutex_unlock(&chip->mutex);
1239 schedule();
1240 remove_wait_queue(&chip->wq, &wait);
1241 mutex_lock(&chip->mutex);
1242 continue;
1243 }
1244
1233 status = map_read(map, cmd_adr); 1245 status = map_read(map, cmd_adr);
1234 if (map_word_andequal(map, status, status_OK, status_OK)) 1246 if (map_word_andequal(map, status, status_OK, status_OK))
1235 break; 1247 break;
1236 1248
1249 if (chip->erase_suspended && chip_state == FL_ERASING) {
1250 /* Erase suspend occured while sleep: reset timeout */
1251 timeo = reset_timeo;
1252 chip->erase_suspended = 0;
1253 }
1254 if (chip->write_suspended && chip_state == FL_WRITING) {
1255 /* Write suspend occured while sleep: reset timeout */
1256 timeo = reset_timeo;
1257 chip->write_suspended = 0;
1258 }
1237 if (!timeo) { 1259 if (!timeo) {
1238 map_write(map, CMD(0x70), cmd_adr); 1260 map_write(map, CMD(0x70), cmd_adr);
1239 chip->state = FL_STATUS; 1261 chip->state = FL_STATUS;
@@ -1257,27 +1279,6 @@ static int inval_cache_and_wait_for_operation(
1257 timeo--; 1279 timeo--;
1258 } 1280 }
1259 mutex_lock(&chip->mutex); 1281 mutex_lock(&chip->mutex);
1260
1261 while (chip->state != chip_state) {
1262 /* Someone's suspended the operation: sleep */
1263 DECLARE_WAITQUEUE(wait, current);
1264 set_current_state(TASK_UNINTERRUPTIBLE);
1265 add_wait_queue(&chip->wq, &wait);
1266 mutex_unlock(&chip->mutex);
1267 schedule();
1268 remove_wait_queue(&chip->wq, &wait);
1269 mutex_lock(&chip->mutex);
1270 }
1271 if (chip->erase_suspended && chip_state == FL_ERASING) {
1272 /* Erase suspend occured while sleep: reset timeout */
1273 timeo = reset_timeo;
1274 chip->erase_suspended = 0;
1275 }
1276 if (chip->write_suspended && chip_state == FL_WRITING) {
1277 /* Write suspend occured while sleep: reset timeout */
1278 timeo = reset_timeo;
1279 chip->write_suspended = 0;
1280 }
1281 } 1282 }
1282 1283
1283 /* Done and happy. */ 1284 /* Done and happy. */
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index d72a5fb2d041..4e1be51cc122 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1935,14 +1935,14 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
1935} 1935}
1936 1936
1937 1937
1938static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) 1938static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index)
1939{ 1939{
1940 int i,num_erase_regions; 1940 int i,num_erase_regions;
1941 uint8_t uaddr; 1941 uint8_t uaddr;
1942 1942
1943 if (! (jedec_table[index].devtypes & p_cfi->device_type)) { 1943 if (!(jedec_table[index].devtypes & cfi->device_type)) {
1944 DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", 1944 DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
1945 jedec_table[index].name, 4 * (1<<p_cfi->device_type)); 1945 jedec_table[index].name, 4 * (1<<cfi->device_type));
1946 return 0; 1946 return 0;
1947 } 1947 }
1948 1948
@@ -1950,27 +1950,28 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1950 1950
1951 num_erase_regions = jedec_table[index].nr_regions; 1951 num_erase_regions = jedec_table[index].nr_regions;
1952 1952
1953 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); 1953 cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
1954 if (!p_cfi->cfiq) { 1954 if (!cfi->cfiq) {
1955 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); 1955 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
1956 return 0; 1956 return 0;
1957 } 1957 }
1958 1958
1959 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); 1959 memset(cfi->cfiq, 0, sizeof(struct cfi_ident));
1960 1960
1961 p_cfi->cfiq->P_ID = jedec_table[index].cmd_set; 1961 cfi->cfiq->P_ID = jedec_table[index].cmd_set;
1962 p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; 1962 cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
1963 p_cfi->cfiq->DevSize = jedec_table[index].dev_size; 1963 cfi->cfiq->DevSize = jedec_table[index].dev_size;
1964 p_cfi->cfi_mode = CFI_MODE_JEDEC; 1964 cfi->cfi_mode = CFI_MODE_JEDEC;
1965 cfi->sector_erase_cmd = CMD(0x30);
1965 1966
1966 for (i=0; i<num_erase_regions; i++){ 1967 for (i=0; i<num_erase_regions; i++){
1967 p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i]; 1968 cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
1968 } 1969 }
1969 p_cfi->cmdset_priv = NULL; 1970 cfi->cmdset_priv = NULL;
1970 1971
1971 /* This may be redundant for some cases, but it doesn't hurt */ 1972 /* This may be redundant for some cases, but it doesn't hurt */
1972 p_cfi->mfr = jedec_table[index].mfr_id; 1973 cfi->mfr = jedec_table[index].mfr_id;
1973 p_cfi->id = jedec_table[index].dev_id; 1974 cfi->id = jedec_table[index].dev_id;
1974 1975
1975 uaddr = jedec_table[index].uaddr; 1976 uaddr = jedec_table[index].uaddr;
1976 1977
@@ -1978,8 +1979,8 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1978 our brains explode when we see the datasheets talking about address 1979 our brains explode when we see the datasheets talking about address
1979 lines numbered from A-1 to A18. The CFI table has unlock addresses 1980 lines numbered from A-1 to A18. The CFI table has unlock addresses
1980 in device-words according to the mode the device is connected in */ 1981 in device-words according to the mode the device is connected in */
1981 p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type; 1982 cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type;
1982 p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type; 1983 cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type;
1983 1984
1984 return 1; /* ok */ 1985 return 1; /* ok */
1985} 1986}
@@ -2175,7 +2176,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2175 "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", 2176 "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
2176 __func__, cfi->mfr, cfi->id, 2177 __func__, cfi->mfr, cfi->id,
2177 cfi->addr_unlock1, cfi->addr_unlock2 ); 2178 cfi->addr_unlock1, cfi->addr_unlock2 );
2178 if (!cfi_jedec_setup(cfi, i)) 2179 if (!cfi_jedec_setup(map, cfi, i))
2179 return 0; 2180 return 0;
2180 goto ok_out; 2181 goto ok_out;
2181 } 2182 }
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 77d64ce19e9f..92de7e3a49a5 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -151,6 +151,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
151 printk(KERN_ERR MOD_NAME 151 printk(KERN_ERR MOD_NAME
152 " %s(): Unable to register resource %pR - kernel bug?\n", 152 " %s(): Unable to register resource %pR - kernel bug?\n",
153 __func__, &window->rsrc); 153 __func__, &window->rsrc);
154 return -EBUSY;
154 } 155 }
155 156
156 157
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index cb20c67995d8..e0a2373bf0e2 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -413,7 +413,6 @@ error3:
413error2: 413error2:
414 list_del(&new->list); 414 list_del(&new->list);
415error1: 415error1:
416 kfree(new);
417 return ret; 416 return ret;
418} 417}
419 418
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 15682ec8530e..28af71c61834 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -968,6 +968,6 @@ static void __exit omap_nand_exit(void)
968module_init(omap_nand_init); 968module_init(omap_nand_init);
969module_exit(omap_nand_exit); 969module_exit(omap_nand_exit);
970 970
971MODULE_ALIAS(DRIVER_NAME); 971MODULE_ALIAS("platform:" DRIVER_NAME);
972MODULE_LICENSE("GPL"); 972MODULE_LICENSE("GPL");
973MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); 973MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index e78914938c5c..ac08750748a3 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -131,7 +131,7 @@ static struct platform_driver generic_onenand_driver = {
131 .remove = __devexit_p(generic_onenand_remove), 131 .remove = __devexit_p(generic_onenand_remove),
132}; 132};
133 133
134MODULE_ALIAS(DRIVER_NAME); 134MODULE_ALIAS("platform:" DRIVER_NAME);
135 135
136static int __init generic_onenand_init(void) 136static int __init generic_onenand_init(void)
137{ 137{
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index ac31f461cc1c..c849cacf4b2f 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -860,7 +860,7 @@ static void __exit omap2_onenand_exit(void)
860module_init(omap2_onenand_init); 860module_init(omap2_onenand_init);
861module_exit(omap2_onenand_exit); 861module_exit(omap2_onenand_exit);
862 862
863MODULE_ALIAS(DRIVER_NAME); 863MODULE_ALIAS("platform:" DRIVER_NAME);
864MODULE_LICENSE("GPL"); 864MODULE_LICENSE("GPL");
865MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); 865MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
866MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3"); 866MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 39214e512452..7ca0eded2561 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data)
425 int csr0, boguscnt; 425 int csr0, boguscnt;
426 int handled = 0; 426 int handled = 0;
427 427
428 if (dev == NULL) {
429 printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n");
430 return IRQ_NONE;
431 }
432
433 lance->RAP = CSR0; /* PCnet-ISA Controller Status */ 428 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
434 429
435 if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ 430 if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 653c62475cb6..8849699c66c4 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,7 +22,7 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.00-5" 25#define DRV_MODULE_VERSION "1.62.00-6"
26#define DRV_MODULE_RELDATE "2011/01/30" 26#define DRV_MODULE_RELDATE "2011/01/30"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
@@ -1211,6 +1211,7 @@ struct bnx2x {
1211 /* DCBX Negotation results */ 1211 /* DCBX Negotation results */
1212 struct dcbx_features dcbx_local_feat; 1212 struct dcbx_features dcbx_local_feat;
1213 u32 dcbx_error; 1213 u32 dcbx_error;
1214 u32 pending_max;
1214}; 1215};
1215 1216
1216/** 1217/**
@@ -1613,19 +1614,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1613#define BNX2X_BTR 4 1614#define BNX2X_BTR 4
1614#define MAX_SPQ_PENDING 8 1615#define MAX_SPQ_PENDING 8
1615 1616
1616 1617/* CMNG constants, as derived from system spec calculations */
1617/* CMNG constants 1618/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
1618 derived from lab experiments, and not from system spec calculations !!! */ 1619#define DEF_MIN_RATE 100
1619#define DEF_MIN_RATE 100 1620/* resolution of the rate shaping timer - 400 usec */
1620/* resolution of the rate shaping timer - 100 usec */ 1621#define RS_PERIODIC_TIMEOUT_USEC 400
1621#define RS_PERIODIC_TIMEOUT_USEC 100
1622/* resolution of fairness algorithm in usecs -
1623 coefficient for calculating the actual t fair */
1624#define T_FAIR_COEF 10000000
1625/* number of bytes in single QM arbitration cycle - 1622/* number of bytes in single QM arbitration cycle -
1626 coefficient for calculating the fairness timer */ 1623 * coefficient for calculating the fairness timer */
1627#define QM_ARB_BYTES 40000 1624#define QM_ARB_BYTES 160000
1628#define FAIR_MEM 2 1625/* resolution of Min algorithm 1:100 */
1626#define MIN_RES 100
1627/* how many bytes above threshold for the minimal credit of Min algorithm*/
1628#define MIN_ABOVE_THRESH 32768
1629/* Fairness algorithm integration time coefficient -
1630 * for calculating the actual Tfair */
1631#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
1632/* Memory of fairness algorithm . 2 cycles */
1633#define FAIR_MEM 2
1629 1634
1630 1635
1631#define ATTN_NIG_FOR_FUNC (1L << 8) 1636#define ATTN_NIG_FOR_FUNC (1L << 8)
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d04c53..a71b32940533 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
259#endif 259#endif
260} 260}
261 261
262/* Timestamp option length allowed for TPA aggregation:
263 *
264 * nop nop kind length echo val
265 */
266#define TPA_TSTAMP_OPT_LEN 12
267/**
268 * Calculate the approximate value of the MSS for this
269 * aggregation using the first packet of it.
270 *
271 * @param bp
272 * @param parsing_flags Parsing flags from the START CQE
273 * @param len_on_bd Total length of the first packet for the
274 * aggregation.
275 */
276static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
277 u16 len_on_bd)
278{
279 /* TPA arrgregation won't have an IP options and TCP options
280 * other than timestamp.
281 */
282 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
283
284
285 /* Check if there was a TCP timestamp, if there is it's will
286 * always be 12 bytes length: nop nop kind length echo val.
287 *
288 * Otherwise FW would close the aggregation.
289 */
290 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
291 hdrs_len += TPA_TSTAMP_OPT_LEN;
292
293 return len_on_bd - hdrs_len;
294}
295
262static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 296static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
263 struct sk_buff *skb, 297 struct sk_buff *skb,
264 struct eth_fast_path_rx_cqe *fp_cqe, 298 struct eth_fast_path_rx_cqe *fp_cqe,
265 u16 cqe_idx) 299 u16 cqe_idx, u16 parsing_flags)
266{ 300{
267 struct sw_rx_page *rx_pg, old_rx_pg; 301 struct sw_rx_page *rx_pg, old_rx_pg;
268 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); 302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
275 309
276 /* This is needed in order to enable forwarding support */ 310 /* This is needed in order to enable forwarding support */
277 if (frag_size) 311 if (frag_size)
278 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, 312 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
279 max(frag_size, (u32)len_on_bd)); 313 len_on_bd);
280 314
281#ifdef BNX2X_STOP_ON_ERROR 315#ifdef BNX2X_STOP_ON_ERROR
282 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { 316 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
344 if (likely(new_skb)) { 378 if (likely(new_skb)) {
345 /* fix ip xsum and give it to the stack */ 379 /* fix ip xsum and give it to the stack */
346 /* (no need to map the new skb) */ 380 /* (no need to map the new skb) */
381 u16 parsing_flags =
382 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
347 383
348 prefetch(skb); 384 prefetch(skb);
349 prefetch(((char *)(skb)) + L1_CACHE_BYTES); 385 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
373 } 409 }
374 410
375 if (!bnx2x_fill_frag_skb(bp, fp, skb, 411 if (!bnx2x_fill_frag_skb(bp, fp, skb,
376 &cqe->fast_path_cqe, cqe_idx)) { 412 &cqe->fast_path_cqe, cqe_idx,
377 if ((le16_to_cpu(cqe->fast_path_cqe. 413 parsing_flags)) {
378 pars_flags.flags) & PARSING_FLAGS_VLAN)) 414 if (parsing_flags & PARSING_FLAGS_VLAN)
379 __vlan_hwaccel_put_tag(skb, 415 __vlan_hwaccel_put_tag(skb,
380 le16_to_cpu(cqe->fast_path_cqe. 416 le16_to_cpu(cqe->fast_path_cqe.
381 vlan_tag)); 417 vlan_tag));
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{ 739{
704 u16 line_speed = bp->link_vars.line_speed; 740 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) { 741 if (IS_MF(bp)) {
706 u16 maxCfg = (bp->mf_config[BP_VN(bp)] & 742 u16 maxCfg = bnx2x_extract_max_cfg(bp,
707 FUNC_MF_CFG_MAX_BW_MASK) >> 743 bp->mf_config[BP_VN(bp)]);
708 FUNC_MF_CFG_MAX_BW_SHIFT; 744
709 /* Calculate the current MAX line speed limit for the DCC 745 /* Calculate the current MAX line speed limit for the MF
710 * capable devices 746 * devices
711 */ 747 */
712 if (IS_MF_SD(bp)) { 748 if (IS_MF_SI(bp))
749 line_speed = (line_speed * maxCfg) / 100;
750 else { /* SD mode */
713 u16 vn_max_rate = maxCfg * 100; 751 u16 vn_max_rate = maxCfg * 100;
714 752
715 if (vn_max_rate < line_speed) 753 if (vn_max_rate < line_speed)
716 line_speed = vn_max_rate; 754 line_speed = vn_max_rate;
717 } else /* IS_MF_SI(bp)) */ 755 }
718 line_speed = (line_speed * maxCfg) / 100;
719 } 756 }
720 757
721 return line_speed; 758 return line_speed;
@@ -959,6 +996,23 @@ void bnx2x_free_skbs(struct bnx2x *bp)
959 bnx2x_free_rx_skbs(bp); 996 bnx2x_free_rx_skbs(bp);
960} 997}
961 998
999void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1000{
1001 /* load old values */
1002 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1003
1004 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1005 /* leave all but MAX value */
1006 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1007
1008 /* set new MAX value */
1009 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1010 & FUNC_MF_CFG_MAX_BW_MASK;
1011
1012 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1013 }
1014}
1015
962static void bnx2x_free_msix_irqs(struct bnx2x *bp) 1016static void bnx2x_free_msix_irqs(struct bnx2x *bp)
963{ 1017{
964 int i, offset = 1; 1018 int i, offset = 1;
@@ -1427,6 +1481,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1427 1481
1428 bnx2x_set_eth_mac(bp, 1); 1482 bnx2x_set_eth_mac(bp, 1);
1429 1483
1484 if (bp->pending_max) {
1485 bnx2x_update_max_mf_config(bp, bp->pending_max);
1486 bp->pending_max = 0;
1487 }
1488
1430 if (bp->port.pmf) 1489 if (bp->port.pmf)
1431 bnx2x_initial_phy_init(bp, load_mode); 1490 bnx2x_initial_phy_init(bp, load_mode);
1432 1491
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d68e6bb..85ea7f26b51f 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -341,6 +341,15 @@ void bnx2x_dcbx_init(struct bnx2x *bp);
341 */ 341 */
342int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 342int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
343 343
344/**
345 * Updates MAX part of MF configuration in HW
346 * (if required)
347 *
348 * @param bp
349 * @param value
350 */
351void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
352
344/* dev_close main block */ 353/* dev_close main block */
345int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 354int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
346 355
@@ -1044,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
1044void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1053void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1045void bnx2x_release_phy_lock(struct bnx2x *bp); 1054void bnx2x_release_phy_lock(struct bnx2x *bp);
1046 1055
1056/**
1057 * Extracts MAX BW part from MF configuration.
1058 *
1059 * @param bp
1060 * @param mf_cfg
1061 *
1062 * @return u16
1063 */
1064static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1065{
1066 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1067 FUNC_MF_CFG_MAX_BW_SHIFT;
1068 if (!max_cfg) {
1069 BNX2X_ERR("Illegal configuration detected for Max BW - "
1070 "using 100 instead\n");
1071 max_cfg = 100;
1072 }
1073 return max_cfg;
1074}
1075
1047#endif /* BNX2X_CMN_H */ 1076#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 5b44a8b48509..7e92f9d0dcfd 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
238 speed |= (cmd->speed_hi << 16); 238 speed |= (cmd->speed_hi << 16);
239 239
240 if (IS_MF_SI(bp)) { 240 if (IS_MF_SI(bp)) {
241 u32 param = 0; 241 u32 part;
242 u32 line_speed = bp->link_vars.line_speed; 242 u32 line_speed = bp->link_vars.line_speed;
243 243
244 /* use 10G if no link detected */ 244 /* use 10G if no link detected */
@@ -251,23 +251,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
251 REQ_BC_VER_4_SET_MF_BW); 251 REQ_BC_VER_4_SET_MF_BW);
252 return -EINVAL; 252 return -EINVAL;
253 } 253 }
254 if (line_speed < speed) { 254
255 BNX2X_DEV_INFO("New speed should be less or equal " 255 part = (speed * 100) / line_speed;
256 "to actual line speed\n"); 256
257 if (line_speed < speed || !part) {
258 BNX2X_DEV_INFO("Speed setting should be in a range "
259 "from 1%% to 100%% "
260 "of actual line speed\n");
257 return -EINVAL; 261 return -EINVAL;
258 } 262 }
259 /* load old values */
260 param = bp->mf_config[BP_VN(bp)];
261
262 /* leave only MIN value */
263 param &= FUNC_MF_CFG_MIN_BW_MASK;
264 263
265 /* set new MAX value */ 264 if (bp->state != BNX2X_STATE_OPEN)
266 param |= (((speed * 100) / line_speed) 265 /* store value for following "load" */
267 << FUNC_MF_CFG_MAX_BW_SHIFT) 266 bp->pending_max = part;
268 & FUNC_MF_CFG_MAX_BW_MASK; 267 else
268 bnx2x_update_max_mf_config(bp, part);
269 269
270 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
271 return 0; 270 return 0;
272 } 271 }
273 272
@@ -1781,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
1781 { 0x100, 0x350 }, /* manuf_info */ 1780 { 0x100, 0x350 }, /* manuf_info */
1782 { 0x450, 0xf0 }, /* feature_info */ 1781 { 0x450, 0xf0 }, /* feature_info */
1783 { 0x640, 0x64 }, /* upgrade_key_info */ 1782 { 0x640, 0x64 }, /* upgrade_key_info */
1784 { 0x6a4, 0x64 },
1785 { 0x708, 0x70 }, /* manuf_key_info */ 1783 { 0x708, 0x70 }, /* manuf_key_info */
1786 { 0x778, 0x70 },
1787 { 0, 0 } 1784 { 0, 0 }
1788 }; 1785 };
1789 __be32 buf[0x350 / 4]; 1786 __be32 buf[0x350 / 4];
@@ -1933,11 +1930,11 @@ static void bnx2x_self_test(struct net_device *dev,
1933 buf[4] = 1; 1930 buf[4] = 1;
1934 etest->flags |= ETH_TEST_FL_FAILED; 1931 etest->flags |= ETH_TEST_FL_FAILED;
1935 } 1932 }
1936 if (bp->port.pmf) 1933
1937 if (bnx2x_link_test(bp, is_serdes) != 0) { 1934 if (bnx2x_link_test(bp, is_serdes) != 0) {
1938 buf[5] = 1; 1935 buf[5] = 1;
1939 etest->flags |= ETH_TEST_FL_FAILED; 1936 etest->flags |= ETH_TEST_FL_FAILED;
1940 } 1937 }
1941 1938
1942#ifdef BNX2X_EXTRA_DEBUG 1939#ifdef BNX2X_EXTRA_DEBUG
1943 bnx2x_panic_dump(bp); 1940 bnx2x_panic_dump(bp);
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 5a268e9a0895..fa6dbe3f2058 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -241,7 +241,7 @@ static const struct {
241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't 241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
242 * want to handle "system kill" flow at the moment. 242 * want to handle "system kill" flow at the moment.
243 */ 243 */
244 BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), 244 BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), 245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), 246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), 247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index d584d32c747d..aa032339e321 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1974 vn_max_rate = 0; 1974 vn_max_rate = 0;
1975 1975
1976 } else { 1976 } else {
1977 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1978
1977 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1979 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1978 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1980 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1979 /* If min rate is zero - set it to 1 */ 1981 /* If fairness is enabled (not all min rates are zeroes) and
1982 if current min rate is zero - set it to 1.
1983 This is a requirement of the algorithm. */
1980 if (bp->vn_weight_sum && (vn_min_rate == 0)) 1984 if (bp->vn_weight_sum && (vn_min_rate == 0))
1981 vn_min_rate = DEF_MIN_RATE; 1985 vn_min_rate = DEF_MIN_RATE;
1982 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1986
1983 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1987 if (IS_MF_SI(bp))
1988 /* maxCfg in percents of linkspeed */
1989 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1990 else
1991 /* maxCfg is absolute in 100Mb units */
1992 vn_max_rate = maxCfg * 100;
1984 } 1993 }
1985 1994
1986 DP(NETIF_MSG_IFUP, 1995 DP(NETIF_MSG_IFUP,
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
2006 m_fair_vn.vn_credit_delta = 2015 m_fair_vn.vn_credit_delta =
2007 max_t(u32, (vn_min_rate * (T_FAIR_COEF / 2016 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2008 (8 * bp->vn_weight_sum))), 2017 (8 * bp->vn_weight_sum))),
2009 (bp->cmng.fair_vars.fair_threshold * 2)); 2018 (bp->cmng.fair_vars.fair_threshold +
2019 MIN_ABOVE_THRESH));
2010 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", 2020 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2011 m_fair_vn.vn_credit_delta); 2021 m_fair_vn.vn_credit_delta);
2012 } 2022 }
@@ -2082,8 +2092,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2082 bnx2x_calc_vn_weight_sum(bp); 2092 bnx2x_calc_vn_weight_sum(bp);
2083 2093
2084 /* calculate and set min-max rate for each vn */ 2094 /* calculate and set min-max rate for each vn */
2085 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2095 if (bp->port.pmf)
2086 bnx2x_init_vn_minmax(bp, vn); 2096 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2097 bnx2x_init_vn_minmax(bp, vn);
2087 2098
2088 /* always enable rate shaping and fairness */ 2099 /* always enable rate shaping and fairness */
2089 bp->cmng.flags.cmng_enables |= 2100 bp->cmng.flags.cmng_enables |=
@@ -2152,13 +2163,6 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2152 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2163 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2153 } 2164 }
2154 2165
2155 /* indicate link status only if link status actually changed */
2156 if (prev_link_status != bp->link_vars.link_status)
2157 bnx2x_link_report(bp);
2158
2159 if (IS_MF(bp))
2160 bnx2x_link_sync_notify(bp);
2161
2162 if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2166 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2163 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2167 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2164 2168
@@ -2170,6 +2174,13 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2170 DP(NETIF_MSG_IFUP, 2174 DP(NETIF_MSG_IFUP,
2171 "single function mode without fairness\n"); 2175 "single function mode without fairness\n");
2172 } 2176 }
2177
2178 if (IS_MF(bp))
2179 bnx2x_link_sync_notify(bp);
2180
2181 /* indicate link status only if link status actually changed */
2182 if (prev_link_status != bp->link_vars.link_status)
2183 bnx2x_link_report(bp);
2173} 2184}
2174 2185
2175void bnx2x__link_status_update(struct bnx2x *bp) 2186void bnx2x__link_status_update(struct bnx2x *bp)
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index bda60d590fa8..3445ded6674f 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1239 if (unlikely(bp->panic)) 1239 if (unlikely(bp->panic))
1240 return; 1240 return;
1241 1241
1242 bnx2x_stats_stm[bp->stats_state][event].action(bp);
1243
1242 /* Protect a state change flow */ 1244 /* Protect a state change flow */
1243 spin_lock_bh(&bp->stats_lock); 1245 spin_lock_bh(&bp->stats_lock);
1244 state = bp->stats_state; 1246 state = bp->stats_state;
1245 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1247 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1246 spin_unlock_bh(&bp->stats_lock); 1248 spin_unlock_bh(&bp->stats_lock);
1247 1249
1248 bnx2x_stats_stm[state][event].action(bp);
1249
1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1252 state, event, bp->stats_state); 1252 state, event, bp->stats_state);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 1024ae158227..a5d5d0b5b155 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port)
281} 281}
282 282
283/** 283/**
284 * __get_rx_machine_lock - lock the port's RX machine 284 * __get_state_machine_lock - lock the port's state machines
285 * @port: the port we're looking at 285 * @port: the port we're looking at
286 * 286 *
287 */ 287 */
288static inline void __get_rx_machine_lock(struct port *port) 288static inline void __get_state_machine_lock(struct port *port)
289{ 289{
290 spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); 290 spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
291} 291}
292 292
293/** 293/**
294 * __release_rx_machine_lock - unlock the port's RX machine 294 * __release_state_machine_lock - unlock the port's state machines
295 * @port: the port we're looking at 295 * @port: the port we're looking at
296 * 296 *
297 */ 297 */
298static inline void __release_rx_machine_lock(struct port *port) 298static inline void __release_state_machine_lock(struct port *port)
299{ 299{
300 spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); 300 spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
301} 301}
302 302
303/** 303/**
@@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port)
388} 388}
389 389
390/** 390/**
391 * __initialize_port_locks - initialize a port's RX machine spinlock 391 * __initialize_port_locks - initialize a port's STATE machine spinlock
392 * @port: the port we're looking at 392 * @port: the port we're looking at
393 * 393 *
394 */ 394 */
395static inline void __initialize_port_locks(struct port *port) 395static inline void __initialize_port_locks(struct port *port)
396{ 396{
397 // make sure it isn't called twice 397 // make sure it isn't called twice
398 spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); 398 spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
399} 399}
400 400
401//conversions 401//conversions
@@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1025{ 1025{
1026 rx_states_t last_state; 1026 rx_states_t last_state;
1027 1027
1028 // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback)
1029 __get_rx_machine_lock(port);
1030
1031 // keep current State Machine state to compare later if it was changed 1028 // keep current State Machine state to compare later if it was changed
1032 last_state = port->sm_rx_state; 1029 last_state = port->sm_rx_state;
1033 1030
@@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1133 pr_err("%s: An illegal loopback occurred on adapter (%s).\n" 1130 pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
1134 "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", 1131 "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
1135 port->slave->dev->master->name, port->slave->dev->name); 1132 port->slave->dev->master->name, port->slave->dev->name);
1136 __release_rx_machine_lock(port);
1137 return; 1133 return;
1138 } 1134 }
1139 __update_selected(lacpdu, port); 1135 __update_selected(lacpdu, port);
@@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1153 break; 1149 break;
1154 } 1150 }
1155 } 1151 }
1156 __release_rx_machine_lock(port);
1157} 1152}
1158 1153
1159/** 1154/**
@@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2155 goto re_arm; 2150 goto re_arm;
2156 } 2151 }
2157 2152
2153 /* Lock around state machines to protect data accessed
2154 * by all (e.g., port->sm_vars). ad_rx_machine may run
2155 * concurrently due to incoming LACPDU.
2156 */
2157 __get_state_machine_lock(port);
2158
2158 ad_rx_machine(NULL, port); 2159 ad_rx_machine(NULL, port);
2159 ad_periodic_machine(port); 2160 ad_periodic_machine(port);
2160 ad_port_selection_logic(port); 2161 ad_port_selection_logic(port);
@@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2164 // turn off the BEGIN bit, since we already handled it 2165 // turn off the BEGIN bit, since we already handled it
2165 if (port->sm_vars & AD_PORT_BEGIN) 2166 if (port->sm_vars & AD_PORT_BEGIN)
2166 port->sm_vars &= ~AD_PORT_BEGIN; 2167 port->sm_vars &= ~AD_PORT_BEGIN;
2168
2169 __release_state_machine_lock(port);
2167 } 2170 }
2168 2171
2169re_arm: 2172re_arm:
@@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2200 case AD_TYPE_LACPDU: 2203 case AD_TYPE_LACPDU:
2201 pr_debug("Received LACPDU on port %d\n", 2204 pr_debug("Received LACPDU on port %d\n",
2202 port->actor_port_number); 2205 port->actor_port_number);
2206 /* Protect against concurrent state machines */
2207 __get_state_machine_lock(port);
2203 ad_rx_machine(lacpdu, port); 2208 ad_rx_machine(lacpdu, port);
2209 __release_state_machine_lock(port);
2204 break; 2210 break;
2205 2211
2206 case AD_TYPE_MARKER: 2212 case AD_TYPE_MARKER:
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 2c46a154f2c6..b28baff70864 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -264,7 +264,8 @@ struct ad_bond_info {
264struct ad_slave_info { 264struct ad_slave_info {
265 struct aggregator aggregator; // 802.3ad aggregator structure 265 struct aggregator aggregator; // 802.3ad aggregator structure
266 struct port port; // 802.3ad port structure 266 struct port port; // 802.3ad port structure
267 spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt 267 spinlock_t state_machine_lock; /* mutex state machines vs.
268 incoming LACPDU */
268 u16 id; 269 u16 id;
269}; 270};
270 271
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 5157e15e96eb..aeea9f9ff6e8 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = {
633}; 633};
634 634
635static const struct can_bittiming_const softing_btr_const = { 635static const struct can_bittiming_const softing_btr_const = {
636 .name = "softing",
636 .tseg1_min = 1, 637 .tseg1_min = 1,
637 .tseg1_max = 16, 638 .tseg1_max = 16,
638 .tseg2_min = 1, 639 .tseg2_min = 1,
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 7ff170cbc7dc..302be4aa69d6 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2760 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2760 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2761 int kcqe_cnt; 2761 int kcqe_cnt;
2762 2762
2763 /* status block index must be read before reading other fields */
2764 rmb();
2763 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2765 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2764 2766
2765 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2767 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2770 barrier(); 2772 barrier();
2771 if (status_idx != *cp->kcq1.status_idx_ptr) { 2773 if (status_idx != *cp->kcq1.status_idx_ptr) {
2772 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2774 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2775 /* status block index must be read first */
2776 rmb();
2773 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2777 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2774 } else 2778 } else
2775 break; 2779 break;
@@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2888 u32 last_status = *info->status_idx_ptr; 2892 u32 last_status = *info->status_idx_ptr;
2889 int kcqe_cnt; 2893 int kcqe_cnt;
2890 2894
2895 /* status block index must be read before reading the KCQ */
2896 rmb();
2891 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2897 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2892 2898
2893 service_kcqes(dev, kcqe_cnt); 2899 service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2898 break; 2904 break;
2899 2905
2900 last_status = *info->status_idx_ptr; 2906 last_status = *info->status_idx_ptr;
2907 /* status block index must be read before reading the KCQ */
2908 rmb();
2901 } 2909 }
2902 return last_status; 2910 return last_status;
2903} 2911}
@@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
2906{ 2914{
2907 struct cnic_dev *dev = (struct cnic_dev *) data; 2915 struct cnic_dev *dev = (struct cnic_dev *) data;
2908 struct cnic_local *cp = dev->cnic_priv; 2916 struct cnic_local *cp = dev->cnic_priv;
2909 u32 status_idx; 2917 u32 status_idx, new_status_idx;
2910 2918
2911 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2919 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2912 return; 2920 return;
2913 2921
2914 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2922 while (1) {
2923 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2915 2924
2916 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2925 CNIC_WR16(dev, cp->kcq1.io_addr,
2926 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2917 2927
2918 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 2928 if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
2919 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 2929 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2930 status_idx, IGU_INT_ENABLE, 1);
2931 break;
2932 }
2933
2934 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2935
2936 if (new_status_idx != status_idx)
2937 continue;
2920 2938
2921 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 2939 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2922 MAX_KCQ_IDX); 2940 MAX_KCQ_IDX);
2923 2941
2924 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2942 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2925 status_idx, IGU_INT_ENABLE, 1); 2943 status_idx, IGU_INT_ENABLE, 1);
2926 } else { 2944
2927 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2945 break;
2928 status_idx, IGU_INT_ENABLE, 1);
2929 } 2946 }
2930} 2947}
2931 2948
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2a628d17d178..7018bfe408a4 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
1008 int ret; 1008 int ret;
1009 1009
1010 /* free and bail if we are shutting down */ 1010 /* free and bail if we are shutting down */
1011 if (unlikely(!netif_running(ndev))) { 1011 if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
1012 dev_kfree_skb_any(skb); 1012 dev_kfree_skb_any(skb);
1013 return; 1013 return;
1014 } 1014 }
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 9d8a20b72fa9..8318ea06cb6d 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
337 for (i = 0; i < PHY_MAX_ADDR; i++) 337 for (i = 0; i < PHY_MAX_ADDR; i++)
338 bp->mii_bus->irq[i] = PHY_POLL; 338 bp->mii_bus->irq[i] = PHY_POLL;
339 339
340 platform_set_drvdata(bp->dev, bp->mii_bus);
341
342 if (mdiobus_register(bp->mii_bus)) { 340 if (mdiobus_register(bp->mii_bus)) {
343 err = -ENXIO; 341 err = -ENXIO;
344 goto err_out_free_mdio_irq; 342 goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
863 bp = netdev_priv(dev); 861 bp = netdev_priv(dev);
864 bp->dev = dev; 862 bp->dev = dev;
865 863
864 platform_set_drvdata(pdev, dev);
866 SET_NETDEV_DEV(dev, &pdev->dev); 865 SET_NETDEV_DEV(dev, &pdev->dev);
867 866
868 spin_lock_init(&bp->lock); 867 spin_lock_init(&bp->lock);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 55c1711f1688..33e7c45a4fe4 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -42,7 +42,8 @@
42#define GBE_CONFIG_RAM_BASE \ 42#define GBE_CONFIG_RAM_BASE \
43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) 43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
44 44
45#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) 45#define GBE_CONFIG_BASE_VIRT \
46 ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
46 47
47#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ 48#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
48 (iowrite16_rep(base + offset, data, count)) 49 (iowrite16_rep(base + offset, data, count))
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3fa110ddb041..2e5022849f18 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -5967,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5967 /* APME bit in EEPROM is mapped to WUC.APME */ 5967 /* APME bit in EEPROM is mapped to WUC.APME */
5968 eeprom_data = er32(WUC); 5968 eeprom_data = er32(WUC);
5969 eeprom_apme_mask = E1000_WUC_APME; 5969 eeprom_apme_mask = E1000_WUC_APME;
5970 if (eeprom_data & E1000_WUC_PHY_WAKE) 5970 if ((hw->mac.type > e1000_ich10lan) &&
5971 (eeprom_data & E1000_WUC_PHY_WAKE))
5971 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 5972 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
5972 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 5973 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
5973 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 5974 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2a71373719ae..cd0282d5d40f 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = {
74 }, { 74 }, {
75 .name = "imx28-fec", 75 .name = "imx28-fec",
76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
77 } 77 },
78 { }
78}; 79};
79 80
80static unsigned char macaddr[ETH_ALEN]; 81static unsigned char macaddr[ETH_ALEN];
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 74486a8b009a..af3822f9ea9a 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
220 * The parameter rar_count will usually be hw->mac.rar_entry_count 220 * The parameter rar_count will usually be hw->mac.rar_entry_count
221 * unless there are workarounds that change this. 221 * unless there are workarounds that change this.
222 **/ 222 **/
223void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, 223static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
224 u8 *mc_addr_list, u32 mc_addr_count, 224 u8 *mc_addr_list, u32 mc_addr_count,
225 u32 rar_used_count, u32 rar_count) 225 u32 rar_used_count, u32 rar_count)
226{ 226{
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f69e73e2191e..79ccb54ab00c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
260 for (i = 0; i < PHY_MAX_ADDR; i++) 260 for (i = 0; i < PHY_MAX_ADDR; i++)
261 bp->mii_bus->irq[i] = PHY_POLL; 261 bp->mii_bus->irq[i] = PHY_POLL;
262 262
263 platform_set_drvdata(bp->dev, bp->mii_bus); 263 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
264 264
265 if (mdiobus_register(bp->mii_bus)) 265 if (mdiobus_register(bp->mii_bus))
266 goto err_out_free_mdio_irq; 266 goto err_out_free_mdio_irq;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5933621ac3ff..fc27a9926d9e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -528,8 +528,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
528 vnet_hdr_len = q->vnet_hdr_sz; 528 vnet_hdr_len = q->vnet_hdr_sz;
529 529
530 err = -EINVAL; 530 err = -EINVAL;
531 if ((len -= vnet_hdr_len) < 0) 531 if (len < vnet_hdr_len)
532 goto err; 532 goto err;
533 len -= vnet_hdr_len;
533 534
534 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, 535 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
535 sizeof(vnet_hdr)); 536 sizeof(vnet_hdr));
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9226cda4d054..530ab5a10bd3 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), 692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
694 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
694 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 695 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
695 PCMCIA_DEVICE_NULL, 696 PCMCIA_DEVICE_NULL,
696}; 697};
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 27e6f6d43cac..e3ebd90ae651 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
49#include <asm/processor.h> 49#include <asm/processor.h>
50 50
51#define DRV_NAME "r6040" 51#define DRV_NAME "r6040"
52#define DRV_VERSION "0.26" 52#define DRV_VERSION "0.27"
53#define DRV_RELDATE "30May2010" 53#define DRV_RELDATE "23Feb2011"
54 54
55/* PHY CHIP Address */ 55/* PHY CHIP Address */
56#define PHY1_ADDR 1 /* For MAC1 */ 56#define PHY1_ADDR 1 /* For MAC1 */
@@ -69,6 +69,8 @@
69 69
70/* MAC registers */ 70/* MAC registers */
71#define MCR0 0x00 /* Control register 0 */ 71#define MCR0 0x00 /* Control register 0 */
72#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
73#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
72#define MCR1 0x04 /* Control register 1 */ 74#define MCR1 0x04 /* Control register 1 */
73#define MAC_RST 0x0001 /* Reset the MAC */ 75#define MAC_RST 0x0001 /* Reset the MAC */
74#define MBCR 0x08 /* Bus control */ 76#define MBCR 0x08 /* Bus control */
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev)
851{ 853{
852 struct r6040_private *lp = netdev_priv(dev); 854 struct r6040_private *lp = netdev_priv(dev);
853 void __iomem *ioaddr = lp->base; 855 void __iomem *ioaddr = lp->base;
854 u16 *adrp;
855 u16 reg;
856 unsigned long flags; 856 unsigned long flags;
857 struct netdev_hw_addr *ha; 857 struct netdev_hw_addr *ha;
858 int i; 858 int i;
859 u16 *adrp;
860 u16 hash_table[4] = { 0 };
861
862 spin_lock_irqsave(&lp->lock, flags);
859 863
860 /* MAC Address */ 864 /* Keep our MAC Address */
861 adrp = (u16 *)dev->dev_addr; 865 adrp = (u16 *)dev->dev_addr;
862 iowrite16(adrp[0], ioaddr + MID_0L); 866 iowrite16(adrp[0], ioaddr + MID_0L);
863 iowrite16(adrp[1], ioaddr + MID_0M); 867 iowrite16(adrp[1], ioaddr + MID_0M);
864 iowrite16(adrp[2], ioaddr + MID_0H); 868 iowrite16(adrp[2], ioaddr + MID_0H);
865 869
866 /* Promiscous Mode */
867 spin_lock_irqsave(&lp->lock, flags);
868
869 /* Clear AMCP & PROM bits */ 870 /* Clear AMCP & PROM bits */
870 reg = ioread16(ioaddr) & ~0x0120; 871 lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
871 if (dev->flags & IFF_PROMISC) {
872 reg |= 0x0020;
873 lp->mcr0 |= 0x0020;
874 }
875 /* Too many multicast addresses
876 * accept all traffic */
877 else if ((netdev_mc_count(dev) > MCAST_MAX) ||
878 (dev->flags & IFF_ALLMULTI))
879 reg |= 0x0020;
880 872
881 iowrite16(reg, ioaddr); 873 /* Promiscuous mode */
882 spin_unlock_irqrestore(&lp->lock, flags); 874 if (dev->flags & IFF_PROMISC)
875 lp->mcr0 |= MCR0_PROMISC;
883 876
884 /* Build the hash table */ 877 /* Enable multicast hash table function to
885 if (netdev_mc_count(dev) > MCAST_MAX) { 878 * receive all multicast packets. */
886 u16 hash_table[4]; 879 else if (dev->flags & IFF_ALLMULTI) {
887 u32 crc; 880 lp->mcr0 |= MCR0_HASH_EN;
888 881
889 for (i = 0; i < 4; i++) 882 for (i = 0; i < MCAST_MAX ; i++) {
890 hash_table[i] = 0; 883 iowrite16(0, ioaddr + MID_1L + 8 * i);
884 iowrite16(0, ioaddr + MID_1M + 8 * i);
885 iowrite16(0, ioaddr + MID_1H + 8 * i);
886 }
891 887
888 for (i = 0; i < 4; i++)
889 hash_table[i] = 0xffff;
890 }
891 /* Use internal multicast address registers if the number of
892 * multicast addresses is not greater than MCAST_MAX. */
893 else if (netdev_mc_count(dev) <= MCAST_MAX) {
894 i = 0;
892 netdev_for_each_mc_addr(ha, dev) { 895 netdev_for_each_mc_addr(ha, dev) {
893 char *addrs = ha->addr; 896 u16 *adrp = (u16 *) ha->addr;
897 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
898 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
899 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
900 i++;
901 }
902 while (i < MCAST_MAX) {
903 iowrite16(0, ioaddr + MID_1L + 8 * i);
904 iowrite16(0, ioaddr + MID_1M + 8 * i);
905 iowrite16(0, ioaddr + MID_1H + 8 * i);
906 i++;
907 }
908 }
909 /* Otherwise, Enable multicast hash table function. */
910 else {
911 u32 crc;
894 912
895 if (!(*addrs & 1)) 913 lp->mcr0 |= MCR0_HASH_EN;
896 continue; 914
915 for (i = 0; i < MCAST_MAX ; i++) {
916 iowrite16(0, ioaddr + MID_1L + 8 * i);
917 iowrite16(0, ioaddr + MID_1M + 8 * i);
918 iowrite16(0, ioaddr + MID_1H + 8 * i);
919 }
897 920
898 crc = ether_crc_le(6, addrs); 921 /* Build multicast hash table */
922 netdev_for_each_mc_addr(ha, dev) {
923 u8 *addrs = ha->addr;
924
925 crc = ether_crc(ETH_ALEN, addrs);
899 crc >>= 26; 926 crc >>= 26;
900 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 927 hash_table[crc >> 4] |= 1 << (crc & 0xf);
901 } 928 }
902 /* Fill the MAC hash tables with their values */ 929 }
930
931 iowrite16(lp->mcr0, ioaddr + MCR0);
932
933 /* Fill the MAC hash tables with their values */
934 if (lp->mcr0 && MCR0_HASH_EN) {
903 iowrite16(hash_table[0], ioaddr + MAR0); 935 iowrite16(hash_table[0], ioaddr + MAR0);
904 iowrite16(hash_table[1], ioaddr + MAR1); 936 iowrite16(hash_table[1], ioaddr + MAR1);
905 iowrite16(hash_table[2], ioaddr + MAR2); 937 iowrite16(hash_table[2], ioaddr + MAR2);
906 iowrite16(hash_table[3], ioaddr + MAR3); 938 iowrite16(hash_table[3], ioaddr + MAR3);
907 } 939 }
908 /* Multicast Address 1~4 case */ 940
909 i = 0; 941 spin_unlock_irqrestore(&lp->lock, flags);
910 netdev_for_each_mc_addr(ha, dev) {
911 if (i >= MCAST_MAX)
912 break;
913 adrp = (u16 *) ha->addr;
914 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
915 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
916 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
917 i++;
918 }
919 while (i < MCAST_MAX) {
920 iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
921 iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
922 iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
923 i++;
924 }
925} 942}
926 943
927static void netdev_get_drvinfo(struct net_device *dev, 944static void netdev_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index ef2133b16f8c..7ffdb80adf40 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -25,6 +25,7 @@
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28#include <linux/pci-aspm.h>
28 29
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/io.h> 31#include <asm/io.h>
@@ -3020,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3020 mii->reg_num_mask = 0x1f; 3021 mii->reg_num_mask = 0x1f;
3021 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); 3022 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
3022 3023
3024 /* disable ASPM completely as that cause random device stop working
3025 * problems as well as full system hangs for some PCIe devices users */
3026 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3027 PCIE_LINK_STATE_CLKPM);
3028
3023 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 3029 /* enable device (incl. PCI PM wakeup and hotplug setup) */
3024 rc = pci_enable_device(pdev); 3030 rc = pci_enable_device(pdev);
3025 if (rc < 0) { 3031 if (rc < 0) {
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 42daf98ba736..35b28f42d208 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3858 3858
3859 /* device is off until link detection */
3860 netif_carrier_off(dev);
3861
3862 return dev; 3859 return dev;
3863} 3860}
3864 3861
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 64bfdae5956f..d70bde95460b 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev)
1178 smsc911x_reg_write(pdata, HW_CFG, 0x00050000); 1178 smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
1179 smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); 1179 smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740);
1180 1180
1181 /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */
1182 spin_lock_irq(&pdata->mac_lock);
1183 smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q);
1184 spin_unlock_irq(&pdata->mac_lock);
1185
1181 /* Make sure EEPROM has finished loading before setting GPIO_CFG */ 1186 /* Make sure EEPROM has finished loading before setting GPIO_CFG */
1182 timeout = 50; 1187 timeout = 50;
1183 while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && 1188 while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) &&
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5ab3084eb9cb..07b1633b7f3f 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
219 struct tx_buf *tx_buf = NULL; 219 struct tx_buf *tx_buf = NULL;
220 struct sk_buff *nskb = NULL; 220 struct sk_buff *nskb = NULL;
221 int ret = 0, i; 221 int ret = 0, i;
222 u16 *hdr, tx_skb_cnt = 0; 222 u16 tx_skb_cnt = 0;
223 u8 *buf; 223 u8 *buf;
224 __le16 *hdr;
224 225
225 if (hif_dev->tx.tx_skb_cnt == 0) 226 if (hif_dev->tx.tx_skb_cnt == 0)
226 return 0; 227 return 0;
@@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
245 246
246 buf = tx_buf->buf; 247 buf = tx_buf->buf;
247 buf += tx_buf->offset; 248 buf += tx_buf->offset;
248 hdr = (u16 *)buf; 249 hdr = (__le16 *)buf;
249 *hdr++ = nskb->len; 250 *hdr++ = cpu_to_le16(nskb->len);
250 *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; 251 *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
251 buf += 4; 252 buf += 4;
252 memcpy(buf, nskb->data, nskb->len); 253 memcpy(buf, nskb->data, nskb->len);
253 tx_buf->len = nskb->len + 4; 254 tx_buf->len = nskb->len + 4;
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 180170d3ce25..2915b11edefb 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
885 struct ath_common *common = ath9k_hw_common(ah); 885 struct ath_common *common = ath9k_hw_common(ah);
886 886
887 if (!(ints & ATH9K_INT_GLOBAL)) 887 if (!(ints & ATH9K_INT_GLOBAL))
888 ath9k_hw_enable_interrupts(ah); 888 ath9k_hw_disable_interrupts(ah);
889 889
890 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 890 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
891 891
@@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
963 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 963 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
964 } 964 }
965 965
966 ath9k_hw_enable_interrupts(ah); 966 if (ints & ATH9K_INT_GLOBAL)
967 ath9k_hw_enable_interrupts(ah);
967 968
968 return; 969 return;
969} 970}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 537732e5964f..f82c400be288 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
118 { USB_DEVICE(0x057c, 0x8402) }, 118 { USB_DEVICE(0x057c, 0x8402) },
119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ 119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
120 { USB_DEVICE(0x1668, 0x1200) }, 120 { USB_DEVICE(0x1668, 0x1200) },
121 /* Airlive X.USB a/b/g/n */
122 { USB_DEVICE(0x1b75, 0x9170) },
121 123
122 /* terminate */ 124 /* terminate */
123 {} 125 {}
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 79ab0a6b1386..537fb8c84e3a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,7 +51,7 @@
51#include "iwl-agn-debugfs.h" 51#include "iwl-agn-debugfs.h"
52 52
53/* Highest firmware API version supported */ 53/* Highest firmware API version supported */
54#define IWL5000_UCODE_API_MAX 2 54#define IWL5000_UCODE_API_MAX 5
55#define IWL5150_UCODE_API_MAX 2 55#define IWL5150_UCODE_API_MAX 2
56 56
57/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 21713a7638c4..9b344a921e74 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ 98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ 100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
101 {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
101 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 102 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
102 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ 103 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
103 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ 104 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 848cc2cce247..518542b4bf9e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2597 __le32 mode; 2597 __le32 mode;
2598 int ret; 2598 int ret;
2599 2599
2600 if (priv->device_type != RNDIS_BCM4320B)
2601 return -ENOTSUPP;
2602
2600 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, 2603 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
2601 enabled ? "enabled" : "disabled", 2604 enabled ? "enabled" : "disabled",
2602 timeout); 2605 timeout);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 28295d0a50f6..4d87b5dc9284 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata;
36 (p)->unique_id = of_pdt_unique_id++; \ 36 (p)->unique_id = of_pdt_unique_id++; \
37} while (0) 37} while (0)
38 38
39static inline const char *of_pdt_node_name(struct device_node *dp) 39static char * __init of_pdt_build_full_name(struct device_node *dp)
40{ 40{
41 return dp->path_component_name; 41 int len, ourlen, plen;
42 char *n;
43
44 dp->path_component_name = build_path_component(dp);
45
46 plen = strlen(dp->parent->full_name);
47 ourlen = strlen(dp->path_component_name);
48 len = ourlen + plen + 2;
49
50 n = prom_early_alloc(len);
51 strcpy(n, dp->parent->full_name);
52 if (!of_node_is_root(dp->parent)) {
53 strcpy(n + plen, "/");
54 plen++;
55 }
56 strcpy(n + plen, dp->path_component_name);
57
58 return n;
42} 59}
43 60
44#else 61#else /* CONFIG_SPARC */
45 62
46static inline void of_pdt_incr_unique_id(void *p) { } 63static inline void of_pdt_incr_unique_id(void *p) { }
47static inline void irq_trans_init(struct device_node *dp) { } 64static inline void irq_trans_init(struct device_node *dp) { }
48 65
49static inline const char *of_pdt_node_name(struct device_node *dp) 66static char * __init of_pdt_build_full_name(struct device_node *dp)
50{ 67{
51 return dp->name; 68 static int failsafe_id = 0; /* for generating unique names on failure */
69 char *buf;
70 int len;
71
72 if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
73 goto failsafe;
74
75 buf = prom_early_alloc(len + 1);
76 if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
77 goto failsafe;
78 return buf;
79
80 failsafe:
81 buf = prom_early_alloc(strlen(dp->parent->full_name) +
82 strlen(dp->name) + 16);
83 sprintf(buf, "%s/%s@unknown%i",
84 of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
85 dp->name, failsafe_id++);
86 pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
87 return buf;
52} 88}
53 89
54#endif /* !CONFIG_SPARC */ 90#endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name)
132 return buf; 168 return buf;
133} 169}
134 170
135static char * __init of_pdt_try_pkg2path(phandle node)
136{
137 char *res, *buf = NULL;
138 int len;
139
140 if (!of_pdt_prom_ops->pkg2path)
141 return NULL;
142
143 if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
144 return NULL;
145 buf = prom_early_alloc(len + 1);
146 if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
147 pr_err("%s: package-to-path failed\n", __func__);
148 return NULL;
149 }
150
151 res = strrchr(buf, '/');
152 if (!res) {
153 pr_err("%s: couldn't find / in %s\n", __func__, buf);
154 return NULL;
155 }
156 return res+1;
157}
158
159/*
160 * When fetching the node's name, first try using package-to-path; if
161 * that fails (either because the arch hasn't supplied a PROM callback,
162 * or some other random failure), fall back to just looking at the node's
163 * 'name' property.
164 */
165static char * __init of_pdt_build_name(phandle node)
166{
167 char *buf;
168
169 buf = of_pdt_try_pkg2path(node);
170 if (!buf)
171 buf = of_pdt_get_one_property(node, "name");
172
173 return buf;
174}
175
176static struct device_node * __init of_pdt_create_node(phandle node, 171static struct device_node * __init of_pdt_create_node(phandle node,
177 struct device_node *parent) 172 struct device_node *parent)
178{ 173{
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
187 182
188 kref_init(&dp->kref); 183 kref_init(&dp->kref);
189 184
190 dp->name = of_pdt_build_name(node); 185 dp->name = of_pdt_get_one_property(node, "name");
191 dp->type = of_pdt_get_one_property(node, "device_type"); 186 dp->type = of_pdt_get_one_property(node, "device_type");
192 dp->phandle = node; 187 dp->phandle = node;
193 188
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
198 return dp; 193 return dp;
199} 194}
200 195
201static char * __init of_pdt_build_full_name(struct device_node *dp)
202{
203 int len, ourlen, plen;
204 char *n;
205
206 plen = strlen(dp->parent->full_name);
207 ourlen = strlen(of_pdt_node_name(dp));
208 len = ourlen + plen + 2;
209
210 n = prom_early_alloc(len);
211 strcpy(n, dp->parent->full_name);
212 if (!of_node_is_root(dp->parent)) {
213 strcpy(n + plen, "/");
214 plen++;
215 }
216 strcpy(n + plen, of_pdt_node_name(dp));
217
218 return n;
219}
220
221static struct device_node * __init of_pdt_build_tree(struct device_node *parent, 196static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
222 phandle node, 197 phandle node,
223 struct device_node ***nextp) 198 struct device_node ***nextp)
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
240 *(*nextp) = dp; 215 *(*nextp) = dp;
241 *nextp = &dp->allnext; 216 *nextp = &dp->allnext;
242 217
243#if defined(CONFIG_SPARC)
244 dp->path_component_name = build_path_component(dp);
245#endif
246 dp->full_name = of_pdt_build_full_name(dp); 218 dp->full_name = of_pdt_build_full_name(dp);
247 219
248 dp->child = of_pdt_build_tree(dp, 220 dp->child = of_pdt_build_tree(dp,
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c
index c3f72192af66..a52039564e74 100644
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ b/drivers/pcmcia/pxa2xx_colibri.c
@@ -181,6 +181,9 @@ static int __init colibri_pcmcia_init(void)
181{ 181{
182 int ret; 182 int ret;
183 183
184 if (!machine_is_colibri() && !machine_is_colibri320())
185 return -ENODEV;
186
184 colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 187 colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
185 if (!colibri_pcmcia_device) 188 if (!colibri_pcmcia_device)
186 return -ENOMEM; 189 return -ENOMEM;
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
index f3a73dd77660..e4c4f3dc0728 100644
--- a/drivers/pps/generators/Kconfig
+++ b/drivers/pps/generators/Kconfig
@@ -6,7 +6,7 @@ comment "PPS generators support"
6 6
7config PPS_GENERATOR_PARPORT 7config PPS_GENERATOR_PARPORT
8 tristate "Parallel port PPS signal generator" 8 tristate "Parallel port PPS signal generator"
9 depends on PARPORT 9 depends on PARPORT && BROKEN
10 help 10 help
11 If you say yes here you get support for a PPS signal generator which 11 If you say yes here you get support for a PPS signal generator which
12 utilizes STROBE pin of a parallel port to send PPS signals. It uses 12 utilizes STROBE pin of a parallel port to send PPS signals. It uses
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index cf953ecbfca9..b80fa2882408 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -77,18 +77,20 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
77} 77}
78 78
79/* Update control registers */ 79/* Update control registers */
80static void s3c_rtc_setaie(int to) 80static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
81{ 81{
82 unsigned int tmp; 82 unsigned int tmp;
83 83
84 pr_debug("%s: aie=%d\n", __func__, to); 84 pr_debug("%s: aie=%d\n", __func__, enabled);
85 85
86 tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; 86 tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
87 87
88 if (to) 88 if (enabled)
89 tmp |= S3C2410_RTCALM_ALMEN; 89 tmp |= S3C2410_RTCALM_ALMEN;
90 90
91 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 91 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
92
93 return 0;
92} 94}
93 95
94static int s3c_rtc_setpie(struct device *dev, int enabled) 96static int s3c_rtc_setpie(struct device *dev, int enabled)
@@ -308,7 +310,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
308 310
309 writeb(alrm_en, base + S3C2410_RTCALM); 311 writeb(alrm_en, base + S3C2410_RTCALM);
310 312
311 s3c_rtc_setaie(alrm->enabled); 313 s3c_rtc_setaie(dev, alrm->enabled);
312 314
313 return 0; 315 return 0;
314} 316}
@@ -440,7 +442,7 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
440 rtc_device_unregister(rtc); 442 rtc_device_unregister(rtc);
441 443
442 s3c_rtc_setpie(&dev->dev, 0); 444 s3c_rtc_setpie(&dev->dev, 0);
443 s3c_rtc_setaie(0); 445 s3c_rtc_setaie(&dev->dev, 0);
444 446
445 clk_disable(rtc_clk); 447 clk_disable(rtc_clk);
446 clk_put(rtc_clk); 448 clk_put(rtc_clk);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index c881a14fa5dd..1f6a4d894e73 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -62,8 +62,8 @@ static int xpram_devs;
62/* 62/*
63 * Parameter parsing functions. 63 * Parameter parsing functions.
64 */ 64 */
65static int __initdata devs = XPRAM_DEVS; 65static int devs = XPRAM_DEVS;
66static char __initdata *sizes[XPRAM_MAX_DEVS]; 66static char *sizes[XPRAM_MAX_DEVS];
67 67
68module_param(devs, int, 0); 68module_param(devs, int, 0);
69module_param_array(sizes, charp, NULL, 0); 69module_param_array(sizes, charp, NULL, 0);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 8cd58e412b5e..5ad44daef73b 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file,
460 unsigned int cmd, unsigned long arg) 460 unsigned int cmd, unsigned long arg)
461{ 461{
462 void __user *argp; 462 void __user *argp;
463 int ct, perm; 463 unsigned int ct;
464 int perm;
464 465
465 argp = (void __user *)arg; 466 argp = (void __user *)arg;
466 467
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 7a242f073632..267b54e8ff5a 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request)
280 return rc; 280 return rc;
281} 281}
282 282
283static inline void
284tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
285{
286 request->callback = (void *) tape_free_request;
287 request->callback_data = NULL;
288 tape_do_io_async(device, request);
289}
290
283extern int tape_oper_handler(int irq, int status); 291extern int tape_oper_handler(int irq, int status);
284extern void tape_noper_handler(int irq, int status); 292extern void tape_noper_handler(int irq, int status);
285extern int tape_open(struct tape_device *); 293extern int tape_open(struct tape_device *);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index c17f35b6136a..c26511171ffe 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int);
53 * Medium sense for 34xx tapes. There is no 'real' medium sense call. 53 * Medium sense for 34xx tapes. There is no 'real' medium sense call.
54 * So we just do a normal sense. 54 * So we just do a normal sense.
55 */ 55 */
56static int 56static void __tape_34xx_medium_sense(struct tape_request *request)
57tape_34xx_medium_sense(struct tape_device *device)
58{ 57{
59 struct tape_request *request; 58 struct tape_device *device = request->device;
60 unsigned char *sense; 59 unsigned char *sense;
61 int rc;
62
63 request = tape_alloc_request(1, 32);
64 if (IS_ERR(request)) {
65 DBF_EXCEPTION(6, "MSEN fail\n");
66 return PTR_ERR(request);
67 }
68
69 request->op = TO_MSEN;
70 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
71 60
72 rc = tape_do_io_interruptible(device, request);
73 if (request->rc == 0) { 61 if (request->rc == 0) {
74 sense = request->cpdata; 62 sense = request->cpdata;
75 63
@@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device)
88 device->tape_generic_status |= GMT_WR_PROT(~0); 76 device->tape_generic_status |= GMT_WR_PROT(~0);
89 else 77 else
90 device->tape_generic_status &= ~GMT_WR_PROT(~0); 78 device->tape_generic_status &= ~GMT_WR_PROT(~0);
91 } else { 79 } else
92 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", 80 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
93 request->rc); 81 request->rc);
94 }
95 tape_free_request(request); 82 tape_free_request(request);
83}
84
85static int tape_34xx_medium_sense(struct tape_device *device)
86{
87 struct tape_request *request;
88 int rc;
89
90 request = tape_alloc_request(1, 32);
91 if (IS_ERR(request)) {
92 DBF_EXCEPTION(6, "MSEN fail\n");
93 return PTR_ERR(request);
94 }
96 95
96 request->op = TO_MSEN;
97 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
98 rc = tape_do_io_interruptible(device, request);
99 __tape_34xx_medium_sense(request);
97 return rc; 100 return rc;
98} 101}
99 102
103static void tape_34xx_medium_sense_async(struct tape_device *device)
104{
105 struct tape_request *request;
106
107 request = tape_alloc_request(1, 32);
108 if (IS_ERR(request)) {
109 DBF_EXCEPTION(6, "MSEN fail\n");
110 return;
111 }
112
113 request->op = TO_MSEN;
114 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
115 request->callback = (void *) __tape_34xx_medium_sense;
116 request->callback_data = NULL;
117 tape_do_io_async(device, request);
118}
119
100struct tape_34xx_work { 120struct tape_34xx_work {
101 struct tape_device *device; 121 struct tape_device *device;
102 enum tape_op op; 122 enum tape_op op;
@@ -109,6 +129,9 @@ struct tape_34xx_work {
109 * is inserted but cannot call tape_do_io* from an interrupt context. 129 * is inserted but cannot call tape_do_io* from an interrupt context.
110 * Maybe that's useful for other actions we want to start from the 130 * Maybe that's useful for other actions we want to start from the
111 * interrupt handler. 131 * interrupt handler.
132 * Note: the work handler is called by the system work queue. The tape
133 * commands started by the handler need to be asynchrounous, otherwise
134 * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
112 */ 135 */
113static void 136static void
114tape_34xx_work_handler(struct work_struct *work) 137tape_34xx_work_handler(struct work_struct *work)
@@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work)
119 142
120 switch(p->op) { 143 switch(p->op) {
121 case TO_MSEN: 144 case TO_MSEN:
122 tape_34xx_medium_sense(device); 145 tape_34xx_medium_sense_async(device);
123 break; 146 break;
124 default: 147 default:
125 DBF_EVENT(3, "T34XX: internal error: unknown work\n"); 148 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index fbe361fcd2c0..de2e99e0a71b 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -329,17 +329,17 @@ out:
329/* 329/*
330 * Enable encryption 330 * Enable encryption
331 */ 331 */
332static int tape_3592_enable_crypt(struct tape_device *device) 332static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
333{ 333{
334 struct tape_request *request; 334 struct tape_request *request;
335 char *data; 335 char *data;
336 336
337 DBF_EVENT(6, "tape_3592_enable_crypt\n"); 337 DBF_EVENT(6, "tape_3592_enable_crypt\n");
338 if (!crypt_supported(device)) 338 if (!crypt_supported(device))
339 return -ENOSYS; 339 return ERR_PTR(-ENOSYS);
340 request = tape_alloc_request(2, 72); 340 request = tape_alloc_request(2, 72);
341 if (IS_ERR(request)) 341 if (IS_ERR(request))
342 return PTR_ERR(request); 342 return request;
343 data = request->cpdata; 343 data = request->cpdata;
344 memset(data,0,72); 344 memset(data,0,72);
345 345
@@ -354,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device)
354 request->op = TO_CRYPT_ON; 354 request->op = TO_CRYPT_ON;
355 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 355 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
356 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 356 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
357 return request;
358}
359
360static int tape_3592_enable_crypt(struct tape_device *device)
361{
362 struct tape_request *request;
363
364 request = __tape_3592_enable_crypt(device);
365 if (IS_ERR(request))
366 return PTR_ERR(request);
357 return tape_do_io_free(device, request); 367 return tape_do_io_free(device, request);
358} 368}
359 369
370static void tape_3592_enable_crypt_async(struct tape_device *device)
371{
372 struct tape_request *request;
373
374 request = __tape_3592_enable_crypt(device);
375 if (!IS_ERR(request))
376 tape_do_io_async_free(device, request);
377}
378
360/* 379/*
361 * Disable encryption 380 * Disable encryption
362 */ 381 */
363static int tape_3592_disable_crypt(struct tape_device *device) 382static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
364{ 383{
365 struct tape_request *request; 384 struct tape_request *request;
366 char *data; 385 char *data;
367 386
368 DBF_EVENT(6, "tape_3592_disable_crypt\n"); 387 DBF_EVENT(6, "tape_3592_disable_crypt\n");
369 if (!crypt_supported(device)) 388 if (!crypt_supported(device))
370 return -ENOSYS; 389 return ERR_PTR(-ENOSYS);
371 request = tape_alloc_request(2, 72); 390 request = tape_alloc_request(2, 72);
372 if (IS_ERR(request)) 391 if (IS_ERR(request))
373 return PTR_ERR(request); 392 return request;
374 data = request->cpdata; 393 data = request->cpdata;
375 memset(data,0,72); 394 memset(data,0,72);
376 395
@@ -383,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device)
383 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 402 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
384 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 403 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
385 404
405 return request;
406}
407
408static int tape_3592_disable_crypt(struct tape_device *device)
409{
410 struct tape_request *request;
411
412 request = __tape_3592_disable_crypt(device);
413 if (IS_ERR(request))
414 return PTR_ERR(request);
386 return tape_do_io_free(device, request); 415 return tape_do_io_free(device, request);
387} 416}
388 417
418static void tape_3592_disable_crypt_async(struct tape_device *device)
419{
420 struct tape_request *request;
421
422 request = __tape_3592_disable_crypt(device);
423 if (!IS_ERR(request))
424 tape_do_io_async_free(device, request);
425}
426
389/* 427/*
390 * IOCTL: Set encryption status 428 * IOCTL: Set encryption status
391 */ 429 */
@@ -457,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
457/* 495/*
458 * SENSE Medium: Get Sense data about medium state 496 * SENSE Medium: Get Sense data about medium state
459 */ 497 */
460static int 498static int tape_3590_sense_medium(struct tape_device *device)
461tape_3590_sense_medium(struct tape_device *device)
462{ 499{
463 struct tape_request *request; 500 struct tape_request *request;
464 501
@@ -470,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device)
470 return tape_do_io_free(device, request); 507 return tape_do_io_free(device, request);
471} 508}
472 509
510static void tape_3590_sense_medium_async(struct tape_device *device)
511{
512 struct tape_request *request;
513
514 request = tape_alloc_request(1, 128);
515 if (IS_ERR(request))
516 return;
517 request->op = TO_MSEN;
518 tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
519 tape_do_io_async_free(device, request);
520}
521
473/* 522/*
474 * MTTELL: Tell block. Return the number of block relative to current file. 523 * MTTELL: Tell block. Return the number of block relative to current file.
475 */ 524 */
@@ -546,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device,
546 * 2. The attention msg is written to the "read subsystem data" buffer. 595 * 2. The attention msg is written to the "read subsystem data" buffer.
547 * In this case we probably should print it to the console. 596 * In this case we probably should print it to the console.
548 */ 597 */
549static int 598static void tape_3590_read_attmsg_async(struct tape_device *device)
550tape_3590_read_attmsg(struct tape_device *device)
551{ 599{
552 struct tape_request *request; 600 struct tape_request *request;
553 char *buf; 601 char *buf;
554 602
555 request = tape_alloc_request(3, 4096); 603 request = tape_alloc_request(3, 4096);
556 if (IS_ERR(request)) 604 if (IS_ERR(request))
557 return PTR_ERR(request); 605 return;
558 request->op = TO_READ_ATTMSG; 606 request->op = TO_READ_ATTMSG;
559 buf = request->cpdata; 607 buf = request->cpdata;
560 buf[0] = PREP_RD_SS_DATA; 608 buf[0] = PREP_RD_SS_DATA;
@@ -562,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device)
562 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); 610 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
563 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); 611 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
564 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 612 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
565 return tape_do_io_free(device, request); 613 tape_do_io_async_free(device, request);
566} 614}
567 615
568/* 616/*
569 * These functions are used to schedule follow-up actions from within an 617 * These functions are used to schedule follow-up actions from within an
570 * interrupt context (like unsolicited interrupts). 618 * interrupt context (like unsolicited interrupts).
619 * Note: the work handler is called by the system work queue. The tape
620 * commands started by the handler need to be asynchrounous, otherwise
621 * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
571 */ 622 */
572struct work_handler_data { 623struct work_handler_data {
573 struct tape_device *device; 624 struct tape_device *device;
@@ -583,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work)
583 634
584 switch (p->op) { 635 switch (p->op) {
585 case TO_MSEN: 636 case TO_MSEN:
586 tape_3590_sense_medium(p->device); 637 tape_3590_sense_medium_async(p->device);
587 break; 638 break;
588 case TO_READ_ATTMSG: 639 case TO_READ_ATTMSG:
589 tape_3590_read_attmsg(p->device); 640 tape_3590_read_attmsg_async(p->device);
590 break; 641 break;
591 case TO_CRYPT_ON: 642 case TO_CRYPT_ON:
592 tape_3592_enable_crypt(p->device); 643 tape_3592_enable_crypt_async(p->device);
593 break; 644 break;
594 case TO_CRYPT_OFF: 645 case TO_CRYPT_OFF:
595 tape_3592_disable_crypt(p->device); 646 tape_3592_disable_crypt_async(p->device);
596 break; 647 break;
597 default: 648 default:
598 DBF_EVENT(3, "T3590: work handler undefined for " 649 DBF_EVENT(3, "T3590: work handler undefined for "
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9045c52abd25..fb2bb35c62cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
443 &sdev->request_queue->queue_flags); 443 &sdev->request_queue->queue_flags);
444 if (flagset) 444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue); 446 __blk_run_queue(sdev->request_queue, false);
447 if (flagset) 447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock); 449 spin_unlock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 998c01be3234..5c3ccfc6b622 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3830 if (flagset) 3830 if (flagset)
3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); 3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3832 __blk_run_queue(rport->rqst_q); 3832 __blk_run_queue(rport->rqst_q, false);
3833 if (flagset) 3833 if (flagset)
3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); 3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 158cecbec718..4a109835e420 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -282,6 +282,9 @@ int core_tmr_lun_reset(
282 282
283 atomic_set(&task->task_active, 0); 283 atomic_set(&task->task_active, 0);
284 atomic_set(&task->task_stop, 0); 284 atomic_set(&task->task_stop, 0);
285 } else {
286 if (atomic_read(&task->task_execute_queue) != 0)
287 transport_remove_task_from_execute_queue(task, dev);
285 } 288 }
286 __transport_stop_task_timer(task, &flags); 289 __transport_stop_task_timer(task, &flags);
287 290
@@ -301,6 +304,7 @@ int core_tmr_lun_reset(
301 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" 304 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
302 " task: %p, t_fe_count: %d dev: %p\n", task, 305 " task: %p, t_fe_count: %d dev: %p\n", task,
303 fe_count, dev); 306 fe_count, dev);
307 atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
304 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, 308 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
305 flags); 309 flags);
306 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 310 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
@@ -310,6 +314,7 @@ int core_tmr_lun_reset(
310 } 314 }
311 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," 315 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
312 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 316 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
317 atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
313 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); 318 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
314 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 319 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
315 320
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 236e22d8cfae..4bbf6c147f89 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1207,7 +1207,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
1207 * 1207 *
1208 * 1208 *
1209 */ 1209 */
1210static void transport_remove_task_from_execute_queue( 1210void transport_remove_task_from_execute_queue(
1211 struct se_task *task, 1211 struct se_task *task,
1212 struct se_device *dev) 1212 struct se_device *dev)
1213{ 1213{
@@ -5549,7 +5549,8 @@ static void transport_generic_wait_for_tasks(
5549 5549
5550 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); 5550 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
5551 } 5551 }
5552 if (!atomic_read(&T_TASK(cmd)->t_transport_active)) 5552 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
5553 atomic_read(&T_TASK(cmd)->t_transport_aborted))
5553 goto remove; 5554 goto remove;
5554 5555
5555 atomic_set(&T_TASK(cmd)->t_transport_stop, 1); 5556 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
@@ -5956,6 +5957,9 @@ static void transport_processing_shutdown(struct se_device *dev)
5956 5957
5957 atomic_set(&task->task_active, 0); 5958 atomic_set(&task->task_active, 0);
5958 atomic_set(&task->task_stop, 0); 5959 atomic_set(&task->task_stop, 0);
5960 } else {
5961 if (atomic_read(&task->task_execute_queue) != 0)
5962 transport_remove_task_from_execute_queue(task, dev);
5959 } 5963 }
5960 __transport_stop_task_timer(task, &flags); 5964 __transport_stop_task_timer(task, &flags);
5961 5965
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index 93760b2ea172..1ef4df9bf7e4 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = {
712 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 712 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
713 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), 713 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
714 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 714 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
715 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
715 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 716 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
716 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), 717 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
717 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), 718 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 3c6e1a058745..5e1495097ec3 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
346 346
347 if (unlikely(!skb)) 347 if (unlikely(!skb))
348 break; 348 break;
349 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
350 req->actual);
351 page = NULL;
352 349
353 if (req->actual < req->length) { /* Last fragment */ 350 if (skb->len == 0) { /* First fragment */
354 skb->protocol = htons(ETH_P_PHONET); 351 skb->protocol = htons(ETH_P_PHONET);
355 skb_reset_mac_header(skb); 352 skb_reset_mac_header(skb);
356 pskb_pull(skb, 1); 353 /* Can't use pskb_pull() on page in IRQ */
354 memcpy(skb_put(skb, 1), page_address(page), 1);
355 }
356
357 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
358 skb->len == 0, req->actual);
359 page = NULL;
360
361 if (req->actual < req->length) { /* Last fragment */
357 skb->dev = dev; 362 skb->dev = dev;
358 dev->stats.rx_packets++; 363 dev->stats.rx_packets++;
359 dev->stats.rx_bytes += skb->len; 364 dev->stats.rx_bytes += skb->len;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index e8f4f36fdf0b..a6f21b891f68 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/of.h> 30#include <linux/of.h>
31#include <linux/of_platform.h> 31#include <linux/of_platform.h>
32#include <linux/of_address.h>
32 33
33/** 34/**
34 * ehci_xilinx_of_setup - Initialize the device for ehci_reset() 35 * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 8010aaeb5adb..dd0e84a9bd2f 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -239,11 +239,15 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
239 lcd->spi = spi; 239 lcd->spi = spi;
240 lcd->power = FB_BLANK_POWERDOWN; 240 lcd->power = FB_BLANK_POWERDOWN;
241 lcd->buffer = kzalloc(8, GFP_KERNEL); 241 lcd->buffer = kzalloc(8, GFP_KERNEL);
242 if (!lcd->buffer) {
243 ret = -ENOMEM;
244 goto out_free_lcd;
245 }
242 246
243 ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops); 247 ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
244 if (IS_ERR(ld)) { 248 if (IS_ERR(ld)) {
245 ret = PTR_ERR(ld); 249 ret = PTR_ERR(ld);
246 goto out_free_lcd; 250 goto out_free_buffer;
247 } 251 }
248 lcd->ld = ld; 252 lcd->ld = ld;
249 253
@@ -257,6 +261,8 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
257 261
258out_unregister: 262out_unregister:
259 lcd_device_unregister(ld); 263 lcd_device_unregister(ld);
264out_free_buffer:
265 kfree(lcd->buffer);
260out_free_lcd: 266out_free_lcd:
261 kfree(lcd); 267 kfree(lcd);
262 return ret; 268 return ret;
@@ -268,6 +274,7 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
268 274
269 ltv350qv_power(lcd, FB_BLANK_POWERDOWN); 275 ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
270 lcd_device_unregister(lcd->ld); 276 lcd_device_unregister(lcd->ld);
277 kfree(lcd->buffer);
271 kfree(lcd); 278 kfree(lcd);
272 279
273 return 0; 280 return 0;
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index eca855a55c0d..3de4ba0260a5 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -646,7 +646,7 @@ static int __devexit cpwd_remove(struct platform_device *op)
646 struct cpwd *p = dev_get_drvdata(&op->dev); 646 struct cpwd *p = dev_get_drvdata(&op->dev);
647 int i; 647 int i;
648 648
649 for (i = 0; i < 4; i++) { 649 for (i = 0; i < WD_NUMDEVS; i++) {
650 misc_deregister(&p->devs[i].misc); 650 misc_deregister(&p->devs[i].misc);
651 651
652 if (!p->enabled) { 652 if (!p->enabled) {
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 24b966d5061a..204a5603c4ae 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -710,7 +710,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
710 return 0; 710 return 0;
711} 711}
712 712
713static void __devexit hpwdt_exit_nmi_decoding(void) 713static void hpwdt_exit_nmi_decoding(void)
714{ 714{
715 unregister_die_notifier(&die_notifier); 715 unregister_die_notifier(&die_notifier);
716 if (cru_rom_addr) 716 if (cru_rom_addr)
@@ -726,7 +726,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
726 return 0; 726 return 0;
727} 727}
728 728
729static void __devexit hpwdt_exit_nmi_decoding(void) 729static void hpwdt_exit_nmi_decoding(void)
730{ 730{
731} 731}
732#endif /* CONFIG_HPWDT_NMI_DECODING */ 732#endif /* CONFIG_HPWDT_NMI_DECODING */
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index c7d67e9a7465..79906255eeb6 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -201,11 +201,14 @@ static struct miscdevice fitpc2_wdt_miscdev = {
201static int __init fitpc2_wdt_init(void) 201static int __init fitpc2_wdt_init(void)
202{ 202{
203 int err; 203 int err;
204 const char *brd_name;
204 205
205 if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2")) 206 brd_name = dmi_get_system_info(DMI_BOARD_NAME);
207
208 if (!brd_name || !strstr(brd_name, "SBC-FITPC2"))
206 return -ENODEV; 209 return -ENODEV;
207 210
208 pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME)); 211 pr_info("%s found\n", brd_name);
209 212
210 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { 213 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) {
211 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); 214 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 0461858e07d0..b61ab1c54293 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -508,7 +508,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
508 sch311x_sio_outb(sio_config_port, 0x07, 0x0a); 508 sch311x_sio_outb(sio_config_port, 0x07, 0x0a);
509 509
510 /* Check if Logical Device Register is currently active */ 510 /* Check if Logical Device Register is currently active */
511 if (sch311x_sio_inb(sio_config_port, 0x30) && 0x01 == 0) 511 if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0)
512 printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n"); 512 printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n");
513 513
514 /* Get the base address of the runtime registers */ 514 /* Get the base address of the runtime registers */
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index a6c12dec91a1..df2a64dc9672 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -109,7 +109,7 @@ static int w83697ug_select_wd_register(void)
109 outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */ 109 outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */
110 outb_p(0x30, WDT_EFER); /* select CR30 */ 110 outb_p(0x30, WDT_EFER); /* select CR30 */
111 c = inb_p(WDT_EFDR); 111 c = inb_p(WDT_EFDR);
112 outb_p(c || 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ 112 outb_p(c | 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
113 113
114 return 0; 114 return 0;
115} 115}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 6f820fa23df4..7f78cc78fdd0 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -729,6 +729,15 @@ struct btrfs_space_info {
729 u64 disk_total; /* total bytes on disk, takes mirrors into 729 u64 disk_total; /* total bytes on disk, takes mirrors into
730 account */ 730 account */
731 731
732 /*
733 * we bump reservation progress every time we decrement
734 * bytes_reserved. This way people waiting for reservations
735 * know something good has happened and they can check
736 * for progress. The number here isn't to be trusted, it
737 * just shows reclaim activity
738 */
739 unsigned long reservation_progress;
740
732 int full; /* indicates that we cannot allocate any more 741 int full; /* indicates that we cannot allocate any more
733 chunks for this space */ 742 chunks for this space */
734 int force_alloc; /* set if we need to force a chunk alloc for 743 int force_alloc; /* set if we need to force a chunk alloc for
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 588ff9849873..7b3089b5c2df 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3342,15 +3342,16 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3342 u64 max_reclaim; 3342 u64 max_reclaim;
3343 u64 reclaimed = 0; 3343 u64 reclaimed = 0;
3344 long time_left; 3344 long time_left;
3345 int pause = 1;
3346 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; 3345 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3347 int loops = 0; 3346 int loops = 0;
3347 unsigned long progress;
3348 3348
3349 block_rsv = &root->fs_info->delalloc_block_rsv; 3349 block_rsv = &root->fs_info->delalloc_block_rsv;
3350 space_info = block_rsv->space_info; 3350 space_info = block_rsv->space_info;
3351 3351
3352 smp_mb(); 3352 smp_mb();
3353 reserved = space_info->bytes_reserved; 3353 reserved = space_info->bytes_reserved;
3354 progress = space_info->reservation_progress;
3354 3355
3355 if (reserved == 0) 3356 if (reserved == 0)
3356 return 0; 3357 return 0;
@@ -3365,31 +3366,36 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3365 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); 3366 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
3366 3367
3367 spin_lock(&space_info->lock); 3368 spin_lock(&space_info->lock);
3368 if (reserved > space_info->bytes_reserved) { 3369 if (reserved > space_info->bytes_reserved)
3369 loops = 0;
3370 reclaimed += reserved - space_info->bytes_reserved; 3370 reclaimed += reserved - space_info->bytes_reserved;
3371 } else {
3372 loops++;
3373 }
3374 reserved = space_info->bytes_reserved; 3371 reserved = space_info->bytes_reserved;
3375 spin_unlock(&space_info->lock); 3372 spin_unlock(&space_info->lock);
3376 3373
3374 loops++;
3375
3377 if (reserved == 0 || reclaimed >= max_reclaim) 3376 if (reserved == 0 || reclaimed >= max_reclaim)
3378 break; 3377 break;
3379 3378
3380 if (trans && trans->transaction->blocked) 3379 if (trans && trans->transaction->blocked)
3381 return -EAGAIN; 3380 return -EAGAIN;
3382 3381
3383 __set_current_state(TASK_INTERRUPTIBLE); 3382 time_left = schedule_timeout_interruptible(1);
3384 time_left = schedule_timeout(pause);
3385 3383
3386 /* We were interrupted, exit */ 3384 /* We were interrupted, exit */
3387 if (time_left) 3385 if (time_left)
3388 break; 3386 break;
3389 3387
3390 pause <<= 1; 3388 /* we've kicked the IO a few times, if anything has been freed,
3391 if (pause > HZ / 10) 3389 * exit. There is no sense in looping here for a long time
3392 pause = HZ / 10; 3390 * when we really need to commit the transaction, or there are
3391 * just too many writers without enough free space
3392 */
3393
3394 if (loops > 3) {
3395 smp_mb();
3396 if (progress != space_info->reservation_progress)
3397 break;
3398 }
3393 3399
3394 } 3400 }
3395 return reclaimed >= to_reclaim; 3401 return reclaimed >= to_reclaim;
@@ -3612,6 +3618,7 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3612 if (num_bytes) { 3618 if (num_bytes) {
3613 spin_lock(&space_info->lock); 3619 spin_lock(&space_info->lock);
3614 space_info->bytes_reserved -= num_bytes; 3620 space_info->bytes_reserved -= num_bytes;
3621 space_info->reservation_progress++;
3615 spin_unlock(&space_info->lock); 3622 spin_unlock(&space_info->lock);
3616 } 3623 }
3617 } 3624 }
@@ -3844,6 +3851,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3844 if (block_rsv->reserved >= block_rsv->size) { 3851 if (block_rsv->reserved >= block_rsv->size) {
3845 num_bytes = block_rsv->reserved - block_rsv->size; 3852 num_bytes = block_rsv->reserved - block_rsv->size;
3846 sinfo->bytes_reserved -= num_bytes; 3853 sinfo->bytes_reserved -= num_bytes;
3854 sinfo->reservation_progress++;
3847 block_rsv->reserved = block_rsv->size; 3855 block_rsv->reserved = block_rsv->size;
3848 block_rsv->full = 1; 3856 block_rsv->full = 1;
3849 } 3857 }
@@ -4005,7 +4013,6 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4005 to_reserve = 0; 4013 to_reserve = 0;
4006 } 4014 }
4007 spin_unlock(&BTRFS_I(inode)->accounting_lock); 4015 spin_unlock(&BTRFS_I(inode)->accounting_lock);
4008
4009 to_reserve += calc_csum_metadata_size(inode, num_bytes); 4016 to_reserve += calc_csum_metadata_size(inode, num_bytes);
4010 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); 4017 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
4011 if (ret) 4018 if (ret)
@@ -4133,6 +4140,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
4133 btrfs_set_block_group_used(&cache->item, old_val); 4140 btrfs_set_block_group_used(&cache->item, old_val);
4134 cache->reserved -= num_bytes; 4141 cache->reserved -= num_bytes;
4135 cache->space_info->bytes_reserved -= num_bytes; 4142 cache->space_info->bytes_reserved -= num_bytes;
4143 cache->space_info->reservation_progress++;
4136 cache->space_info->bytes_used += num_bytes; 4144 cache->space_info->bytes_used += num_bytes;
4137 cache->space_info->disk_used += num_bytes * factor; 4145 cache->space_info->disk_used += num_bytes * factor;
4138 spin_unlock(&cache->lock); 4146 spin_unlock(&cache->lock);
@@ -4184,6 +4192,7 @@ static int pin_down_extent(struct btrfs_root *root,
4184 if (reserved) { 4192 if (reserved) {
4185 cache->reserved -= num_bytes; 4193 cache->reserved -= num_bytes;
4186 cache->space_info->bytes_reserved -= num_bytes; 4194 cache->space_info->bytes_reserved -= num_bytes;
4195 cache->space_info->reservation_progress++;
4187 } 4196 }
4188 spin_unlock(&cache->lock); 4197 spin_unlock(&cache->lock);
4189 spin_unlock(&cache->space_info->lock); 4198 spin_unlock(&cache->space_info->lock);
@@ -4234,6 +4243,7 @@ static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
4234 space_info->bytes_readonly += num_bytes; 4243 space_info->bytes_readonly += num_bytes;
4235 cache->reserved -= num_bytes; 4244 cache->reserved -= num_bytes;
4236 space_info->bytes_reserved -= num_bytes; 4245 space_info->bytes_reserved -= num_bytes;
4246 space_info->reservation_progress++;
4237 } 4247 }
4238 spin_unlock(&cache->lock); 4248 spin_unlock(&cache->lock);
4239 spin_unlock(&space_info->lock); 4249 spin_unlock(&space_info->lock);
@@ -4712,6 +4722,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4712 if (ret) { 4722 if (ret) {
4713 spin_lock(&cache->space_info->lock); 4723 spin_lock(&cache->space_info->lock);
4714 cache->space_info->bytes_reserved -= buf->len; 4724 cache->space_info->bytes_reserved -= buf->len;
4725 cache->space_info->reservation_progress++;
4715 spin_unlock(&cache->space_info->lock); 4726 spin_unlock(&cache->space_info->lock);
4716 } 4727 }
4717 goto out; 4728 goto out;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index fd3f172e94e6..714adc4ac4c2 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3046,17 +3046,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3046 } 3046 }
3047 3047
3048 while (!end) { 3048 while (!end) {
3049 off = extent_map_end(em); 3049 u64 offset_in_extent;
3050 if (off >= max) 3050
3051 end = 1; 3051 /* break if the extent we found is outside the range */
3052 if (em->start >= max || extent_map_end(em) < off)
3053 break;
3054
3055 /*
3056 * get_extent may return an extent that starts before our
3057 * requested range. We have to make sure the ranges
3058 * we return to fiemap always move forward and don't
3059 * overlap, so adjust the offsets here
3060 */
3061 em_start = max(em->start, off);
3052 3062
3053 em_start = em->start; 3063 /*
3054 em_len = em->len; 3064 * record the offset from the start of the extent
3065 * for adjusting the disk offset below
3066 */
3067 offset_in_extent = em_start - em->start;
3055 em_end = extent_map_end(em); 3068 em_end = extent_map_end(em);
3069 em_len = em_end - em_start;
3056 emflags = em->flags; 3070 emflags = em->flags;
3057 disko = 0; 3071 disko = 0;
3058 flags = 0; 3072 flags = 0;
3059 3073
3074 /*
3075 * bump off for our next call to get_extent
3076 */
3077 off = extent_map_end(em);
3078 if (off >= max)
3079 end = 1;
3080
3060 if (em->block_start == EXTENT_MAP_LAST_BYTE) { 3081 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3061 end = 1; 3082 end = 1;
3062 flags |= FIEMAP_EXTENT_LAST; 3083 flags |= FIEMAP_EXTENT_LAST;
@@ -3067,7 +3088,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3067 flags |= (FIEMAP_EXTENT_DELALLOC | 3088 flags |= (FIEMAP_EXTENT_DELALLOC |
3068 FIEMAP_EXTENT_UNKNOWN); 3089 FIEMAP_EXTENT_UNKNOWN);
3069 } else { 3090 } else {
3070 disko = em->block_start; 3091 disko = em->block_start + offset_in_extent;
3071 } 3092 }
3072 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 3093 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3073 flags |= FIEMAP_EXTENT_ENCODED; 3094 flags |= FIEMAP_EXTENT_ENCODED;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 7084140d5940..f447b783bb84 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -70,6 +70,19 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
70 70
71 /* Flush processor's dcache for this page */ 71 /* Flush processor's dcache for this page */
72 flush_dcache_page(page); 72 flush_dcache_page(page);
73
74 /*
75 * if we get a partial write, we can end up with
76 * partially up to date pages. These add
77 * a lot of complexity, so make sure they don't
78 * happen by forcing this copy to be retried.
79 *
80 * The rest of the btrfs_file_write code will fall
81 * back to page at a time copies after we return 0.
82 */
83 if (!PageUptodate(page) && copied < count)
84 copied = 0;
85
73 iov_iter_advance(i, copied); 86 iov_iter_advance(i, copied);
74 write_bytes -= copied; 87 write_bytes -= copied;
75 total_copied += copied; 88 total_copied += copied;
@@ -763,6 +776,27 @@ out:
763} 776}
764 777
765/* 778/*
779 * on error we return an unlocked page and the error value
780 * on success we return a locked page and 0
781 */
782static int prepare_uptodate_page(struct page *page, u64 pos)
783{
784 int ret = 0;
785
786 if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) {
787 ret = btrfs_readpage(NULL, page);
788 if (ret)
789 return ret;
790 lock_page(page);
791 if (!PageUptodate(page)) {
792 unlock_page(page);
793 return -EIO;
794 }
795 }
796 return 0;
797}
798
799/*
766 * this gets pages into the page cache and locks them down, it also properly 800 * this gets pages into the page cache and locks them down, it also properly
767 * waits for data=ordered extents to finish before allowing the pages to be 801 * waits for data=ordered extents to finish before allowing the pages to be
768 * modified. 802 * modified.
@@ -777,6 +811,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
777 unsigned long index = pos >> PAGE_CACHE_SHIFT; 811 unsigned long index = pos >> PAGE_CACHE_SHIFT;
778 struct inode *inode = fdentry(file)->d_inode; 812 struct inode *inode = fdentry(file)->d_inode;
779 int err = 0; 813 int err = 0;
814 int faili = 0;
780 u64 start_pos; 815 u64 start_pos;
781 u64 last_pos; 816 u64 last_pos;
782 817
@@ -794,15 +829,24 @@ again:
794 for (i = 0; i < num_pages; i++) { 829 for (i = 0; i < num_pages; i++) {
795 pages[i] = grab_cache_page(inode->i_mapping, index + i); 830 pages[i] = grab_cache_page(inode->i_mapping, index + i);
796 if (!pages[i]) { 831 if (!pages[i]) {
797 int c; 832 faili = i - 1;
798 for (c = i - 1; c >= 0; c--) { 833 err = -ENOMEM;
799 unlock_page(pages[c]); 834 goto fail;
800 page_cache_release(pages[c]); 835 }
801 } 836
802 return -ENOMEM; 837 if (i == 0)
838 err = prepare_uptodate_page(pages[i], pos);
839 if (i == num_pages - 1)
840 err = prepare_uptodate_page(pages[i],
841 pos + write_bytes);
842 if (err) {
843 page_cache_release(pages[i]);
844 faili = i - 1;
845 goto fail;
803 } 846 }
804 wait_on_page_writeback(pages[i]); 847 wait_on_page_writeback(pages[i]);
805 } 848 }
849 err = 0;
806 if (start_pos < inode->i_size) { 850 if (start_pos < inode->i_size) {
807 struct btrfs_ordered_extent *ordered; 851 struct btrfs_ordered_extent *ordered;
808 lock_extent_bits(&BTRFS_I(inode)->io_tree, 852 lock_extent_bits(&BTRFS_I(inode)->io_tree,
@@ -842,6 +886,14 @@ again:
842 WARN_ON(!PageLocked(pages[i])); 886 WARN_ON(!PageLocked(pages[i]));
843 } 887 }
844 return 0; 888 return 0;
889fail:
890 while (faili >= 0) {
891 unlock_page(pages[faili]);
892 page_cache_release(pages[faili]);
893 faili--;
894 }
895 return err;
896
845} 897}
846 898
847static ssize_t btrfs_file_aio_write(struct kiocb *iocb, 899static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
@@ -851,7 +903,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
851 struct file *file = iocb->ki_filp; 903 struct file *file = iocb->ki_filp;
852 struct inode *inode = fdentry(file)->d_inode; 904 struct inode *inode = fdentry(file)->d_inode;
853 struct btrfs_root *root = BTRFS_I(inode)->root; 905 struct btrfs_root *root = BTRFS_I(inode)->root;
854 struct page *pinned[2];
855 struct page **pages = NULL; 906 struct page **pages = NULL;
856 struct iov_iter i; 907 struct iov_iter i;
857 loff_t *ppos = &iocb->ki_pos; 908 loff_t *ppos = &iocb->ki_pos;
@@ -872,9 +923,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
872 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || 923 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
873 (file->f_flags & O_DIRECT)); 924 (file->f_flags & O_DIRECT));
874 925
875 pinned[0] = NULL;
876 pinned[1] = NULL;
877
878 start_pos = pos; 926 start_pos = pos;
879 927
880 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 928 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
@@ -962,32 +1010,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
962 first_index = pos >> PAGE_CACHE_SHIFT; 1010 first_index = pos >> PAGE_CACHE_SHIFT;
963 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; 1011 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
964 1012
965 /*
966 * there are lots of better ways to do this, but this code
967 * makes sure the first and last page in the file range are
968 * up to date and ready for cow
969 */
970 if ((pos & (PAGE_CACHE_SIZE - 1))) {
971 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
972 if (!PageUptodate(pinned[0])) {
973 ret = btrfs_readpage(NULL, pinned[0]);
974 BUG_ON(ret);
975 wait_on_page_locked(pinned[0]);
976 } else {
977 unlock_page(pinned[0]);
978 }
979 }
980 if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
981 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
982 if (!PageUptodate(pinned[1])) {
983 ret = btrfs_readpage(NULL, pinned[1]);
984 BUG_ON(ret);
985 wait_on_page_locked(pinned[1]);
986 } else {
987 unlock_page(pinned[1]);
988 }
989 }
990
991 while (iov_iter_count(&i) > 0) { 1013 while (iov_iter_count(&i) > 0) {
992 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1014 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
993 size_t write_bytes = min(iov_iter_count(&i), 1015 size_t write_bytes = min(iov_iter_count(&i),
@@ -1024,8 +1046,20 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1024 1046
1025 copied = btrfs_copy_from_user(pos, num_pages, 1047 copied = btrfs_copy_from_user(pos, num_pages,
1026 write_bytes, pages, &i); 1048 write_bytes, pages, &i);
1027 dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> 1049
1028 PAGE_CACHE_SHIFT; 1050 /*
1051 * if we have trouble faulting in the pages, fall
1052 * back to one page at a time
1053 */
1054 if (copied < write_bytes)
1055 nrptrs = 1;
1056
1057 if (copied == 0)
1058 dirty_pages = 0;
1059 else
1060 dirty_pages = (copied + offset +
1061 PAGE_CACHE_SIZE - 1) >>
1062 PAGE_CACHE_SHIFT;
1029 1063
1030 if (num_pages > dirty_pages) { 1064 if (num_pages > dirty_pages) {
1031 if (copied > 0) 1065 if (copied > 0)
@@ -1069,10 +1103,6 @@ out:
1069 err = ret; 1103 err = ret;
1070 1104
1071 kfree(pages); 1105 kfree(pages);
1072 if (pinned[0])
1073 page_cache_release(pinned[0]);
1074 if (pinned[1])
1075 page_cache_release(pinned[1]);
1076 *ppos = pos; 1106 *ppos = pos;
1077 1107
1078 /* 1108 /*
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 0efdb65953c5..9007bbd01dbf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4821,10 +4821,11 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4821 goto fail; 4821 goto fail;
4822 4822
4823 /* 4823 /*
4824 * 1 item for inode ref 4824 * 2 items for inode and inode ref
4825 * 2 items for dir items 4825 * 2 items for dir items
4826 * 1 item for parent inode
4826 */ 4827 */
4827 trans = btrfs_start_transaction(root, 3); 4828 trans = btrfs_start_transaction(root, 5);
4828 if (IS_ERR(trans)) { 4829 if (IS_ERR(trans)) {
4829 err = PTR_ERR(trans); 4830 err = PTR_ERR(trans);
4830 goto fail; 4831 goto fail;
@@ -6056,6 +6057,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6056 if (!skip_sum) { 6057 if (!skip_sum) {
6057 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); 6058 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
6058 if (!dip->csums) { 6059 if (!dip->csums) {
6060 kfree(dip);
6059 ret = -ENOMEM; 6061 ret = -ENOMEM;
6060 goto free_ordered; 6062 goto free_ordered;
6061 } 6063 }
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index f0aef787a102..ebafa65a29b6 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -60,7 +60,6 @@ int ceph_init_dentry(struct dentry *dentry)
60 } 60 }
61 di->dentry = dentry; 61 di->dentry = dentry;
62 di->lease_session = NULL; 62 di->lease_session = NULL;
63 di->parent_inode = igrab(dentry->d_parent->d_inode);
64 dentry->d_fsdata = di; 63 dentry->d_fsdata = di;
65 dentry->d_time = jiffies; 64 dentry->d_time = jiffies;
66 ceph_dentry_lru_add(dentry); 65 ceph_dentry_lru_add(dentry);
@@ -410,7 +409,7 @@ more:
410 spin_lock(&inode->i_lock); 409 spin_lock(&inode->i_lock);
411 if (ci->i_release_count == fi->dir_release_count) { 410 if (ci->i_release_count == fi->dir_release_count) {
412 dout(" marking %p complete\n", inode); 411 dout(" marking %p complete\n", inode);
413 ci->i_ceph_flags |= CEPH_I_COMPLETE; 412 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
414 ci->i_max_offset = filp->f_pos; 413 ci->i_max_offset = filp->f_pos;
415 } 414 }
416 spin_unlock(&inode->i_lock); 415 spin_unlock(&inode->i_lock);
@@ -497,6 +496,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
497 496
498 /* .snap dir? */ 497 /* .snap dir? */
499 if (err == -ENOENT && 498 if (err == -ENOENT &&
499 ceph_snap(parent) == CEPH_NOSNAP &&
500 strcmp(dentry->d_name.name, 500 strcmp(dentry->d_name.name,
501 fsc->mount_options->snapdir_name) == 0) { 501 fsc->mount_options->snapdir_name) == 0) {
502 struct inode *inode = ceph_get_snapdir(parent); 502 struct inode *inode = ceph_get_snapdir(parent);
@@ -993,7 +993,7 @@ static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
993{ 993{
994 struct inode *dir; 994 struct inode *dir;
995 995
996 if (nd->flags & LOOKUP_RCU) 996 if (nd && nd->flags & LOOKUP_RCU)
997 return -ECHILD; 997 return -ECHILD;
998 998
999 dir = dentry->d_parent->d_inode; 999 dir = dentry->d_parent->d_inode;
@@ -1030,28 +1030,8 @@ out_touch:
1030static void ceph_dentry_release(struct dentry *dentry) 1030static void ceph_dentry_release(struct dentry *dentry)
1031{ 1031{
1032 struct ceph_dentry_info *di = ceph_dentry(dentry); 1032 struct ceph_dentry_info *di = ceph_dentry(dentry);
1033 struct inode *parent_inode = NULL;
1034 u64 snapid = CEPH_NOSNAP;
1035 1033
1036 if (!IS_ROOT(dentry)) { 1034 dout("dentry_release %p\n", dentry);
1037 parent_inode = di->parent_inode;
1038 if (parent_inode)
1039 snapid = ceph_snap(parent_inode);
1040 }
1041 dout("dentry_release %p parent %p\n", dentry, parent_inode);
1042 if (parent_inode && snapid != CEPH_SNAPDIR) {
1043 struct ceph_inode_info *ci = ceph_inode(parent_inode);
1044
1045 spin_lock(&parent_inode->i_lock);
1046 if (ci->i_shared_gen == di->lease_shared_gen ||
1047 snapid <= CEPH_MAXSNAP) {
1048 dout(" clearing %p complete (d_release)\n",
1049 parent_inode);
1050 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
1051 ci->i_release_count++;
1052 }
1053 spin_unlock(&parent_inode->i_lock);
1054 }
1055 if (di) { 1035 if (di) {
1056 ceph_dentry_lru_del(dentry); 1036 ceph_dentry_lru_del(dentry);
1057 if (di->lease_session) 1037 if (di->lease_session)
@@ -1059,8 +1039,6 @@ static void ceph_dentry_release(struct dentry *dentry)
1059 kmem_cache_free(ceph_dentry_cachep, di); 1039 kmem_cache_free(ceph_dentry_cachep, di);
1060 dentry->d_fsdata = NULL; 1040 dentry->d_fsdata = NULL;
1061 } 1041 }
1062 if (parent_inode)
1063 iput(parent_inode);
1064} 1042}
1065 1043
1066static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1044static int ceph_snapdir_d_revalidate(struct dentry *dentry,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 5625463aa479..193bfa5e9cbd 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -707,7 +707,7 @@ static int fill_inode(struct inode *inode,
707 (issued & CEPH_CAP_FILE_EXCL) == 0 && 707 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
708 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 708 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
709 dout(" marking %p complete (empty)\n", inode); 709 dout(" marking %p complete (empty)\n", inode);
710 ci->i_ceph_flags |= CEPH_I_COMPLETE; 710 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
711 ci->i_max_offset = 2; 711 ci->i_max_offset = 2;
712 } 712 }
713 break; 713 break;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 88fcaa21b801..20b907d76ae2 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -207,7 +207,6 @@ struct ceph_dentry_info {
207 struct dentry *dentry; 207 struct dentry *dentry;
208 u64 time; 208 u64 time;
209 u64 offset; 209 u64 offset;
210 struct inode *parent_inode;
211}; 210};
212 211
213struct ceph_inode_xattrs_info { 212struct ceph_inode_xattrs_info {
diff --git a/fs/compat.c b/fs/compat.c
index f6fd0a00e6cc..691c3fd8ce1d 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1228,7 +1228,9 @@ compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec,
1228 file = fget_light(fd, &fput_needed); 1228 file = fget_light(fd, &fput_needed);
1229 if (!file) 1229 if (!file)
1230 return -EBADF; 1230 return -EBADF;
1231 ret = compat_readv(file, vec, vlen, &pos); 1231 ret = -ESPIPE;
1232 if (file->f_mode & FMODE_PREAD)
1233 ret = compat_readv(file, vec, vlen, &pos);
1232 fput_light(file, fput_needed); 1234 fput_light(file, fput_needed);
1233 return ret; 1235 return ret;
1234} 1236}
@@ -1285,7 +1287,9 @@ compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec,
1285 file = fget_light(fd, &fput_needed); 1287 file = fget_light(fd, &fput_needed);
1286 if (!file) 1288 if (!file)
1287 return -EBADF; 1289 return -EBADF;
1288 ret = compat_writev(file, vec, vlen, &pos); 1290 ret = -ESPIPE;
1291 if (file->f_mode & FMODE_PWRITE)
1292 ret = compat_writev(file, vec, vlen, &pos);
1289 fput_light(file, fput_needed); 1293 fput_light(file, fput_needed);
1290 return ret; 1294 return ret;
1291} 1295}
diff --git a/fs/dcache.c b/fs/dcache.c
index 2a6bd9a4ae97..611ffe928c03 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1523,6 +1523,28 @@ struct dentry * d_alloc_root(struct inode * root_inode)
1523} 1523}
1524EXPORT_SYMBOL(d_alloc_root); 1524EXPORT_SYMBOL(d_alloc_root);
1525 1525
1526static struct dentry * __d_find_any_alias(struct inode *inode)
1527{
1528 struct dentry *alias;
1529
1530 if (list_empty(&inode->i_dentry))
1531 return NULL;
1532 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
1533 __dget(alias);
1534 return alias;
1535}
1536
1537static struct dentry * d_find_any_alias(struct inode *inode)
1538{
1539 struct dentry *de;
1540
1541 spin_lock(&inode->i_lock);
1542 de = __d_find_any_alias(inode);
1543 spin_unlock(&inode->i_lock);
1544 return de;
1545}
1546
1547
1526/** 1548/**
1527 * d_obtain_alias - find or allocate a dentry for a given inode 1549 * d_obtain_alias - find or allocate a dentry for a given inode
1528 * @inode: inode to allocate the dentry for 1550 * @inode: inode to allocate the dentry for
@@ -1552,7 +1574,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
1552 if (IS_ERR(inode)) 1574 if (IS_ERR(inode))
1553 return ERR_CAST(inode); 1575 return ERR_CAST(inode);
1554 1576
1555 res = d_find_alias(inode); 1577 res = d_find_any_alias(inode);
1556 if (res) 1578 if (res)
1557 goto out_iput; 1579 goto out_iput;
1558 1580
@@ -1565,7 +1587,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
1565 1587
1566 1588
1567 spin_lock(&inode->i_lock); 1589 spin_lock(&inode->i_lock);
1568 res = __d_find_alias(inode, 0); 1590 res = __d_find_any_alias(inode);
1569 if (res) { 1591 if (res) {
1570 spin_unlock(&inode->i_lock); 1592 spin_unlock(&inode->i_lock);
1571 dput(tmp); 1593 dput(tmp);
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index 264e95d02830..4d70db110cfc 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -272,7 +272,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
272 new_de = exofs_find_entry(new_dir, new_dentry, &new_page); 272 new_de = exofs_find_entry(new_dir, new_dentry, &new_page);
273 if (!new_de) 273 if (!new_de)
274 goto out_dir; 274 goto out_dir;
275 inode_inc_link_count(old_inode);
276 err = exofs_set_link(new_dir, new_de, new_page, old_inode); 275 err = exofs_set_link(new_dir, new_de, new_page, old_inode);
277 new_inode->i_ctime = CURRENT_TIME; 276 new_inode->i_ctime = CURRENT_TIME;
278 if (dir_de) 277 if (dir_de)
@@ -286,12 +285,9 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
286 if (new_dir->i_nlink >= EXOFS_LINK_MAX) 285 if (new_dir->i_nlink >= EXOFS_LINK_MAX)
287 goto out_dir; 286 goto out_dir;
288 } 287 }
289 inode_inc_link_count(old_inode);
290 err = exofs_add_link(new_dentry, old_inode); 288 err = exofs_add_link(new_dentry, old_inode);
291 if (err) { 289 if (err)
292 inode_dec_link_count(old_inode);
293 goto out_dir; 290 goto out_dir;
294 }
295 if (dir_de) 291 if (dir_de)
296 inode_inc_link_count(new_dir); 292 inode_inc_link_count(new_dir);
297 } 293 }
@@ -299,7 +295,7 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
299 old_inode->i_ctime = CURRENT_TIME; 295 old_inode->i_ctime = CURRENT_TIME;
300 296
301 exofs_delete_entry(old_de, old_page); 297 exofs_delete_entry(old_de, old_page);
302 inode_dec_link_count(old_inode); 298 mark_inode_dirty(old_inode);
303 299
304 if (dir_de) { 300 if (dir_de) {
305 err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); 301 err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 2e1d8341d827..adb91855ccd0 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
344 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); 344 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
345 if (!new_de) 345 if (!new_de)
346 goto out_dir; 346 goto out_dir;
347 inode_inc_link_count(old_inode);
348 ext2_set_link(new_dir, new_de, new_page, old_inode, 1); 347 ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
349 new_inode->i_ctime = CURRENT_TIME_SEC; 348 new_inode->i_ctime = CURRENT_TIME_SEC;
350 if (dir_de) 349 if (dir_de)
@@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
356 if (new_dir->i_nlink >= EXT2_LINK_MAX) 355 if (new_dir->i_nlink >= EXT2_LINK_MAX)
357 goto out_dir; 356 goto out_dir;
358 } 357 }
359 inode_inc_link_count(old_inode);
360 err = ext2_add_link(new_dentry, old_inode); 358 err = ext2_add_link(new_dentry, old_inode);
361 if (err) { 359 if (err)
362 inode_dec_link_count(old_inode);
363 goto out_dir; 360 goto out_dir;
364 }
365 if (dir_de) 361 if (dir_de)
366 inode_inc_link_count(new_dir); 362 inode_inc_link_count(new_dir);
367 } 363 }
@@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
369 /* 365 /*
370 * Like most other Unix systems, set the ctime for inodes on a 366 * Like most other Unix systems, set the ctime for inodes on a
371 * rename. 367 * rename.
372 * inode_dec_link_count() will mark the inode dirty.
373 */ 368 */
374 old_inode->i_ctime = CURRENT_TIME_SEC; 369 old_inode->i_ctime = CURRENT_TIME_SEC;
370 mark_inode_dirty(old_inode);
375 371
376 ext2_delete_entry (old_de, old_page); 372 ext2_delete_entry (old_de, old_page);
377 inode_dec_link_count(old_inode);
378 373
379 if (dir_de) { 374 if (dir_de) {
380 if (old_dir != new_dir) 375 if (old_dir != new_dir)
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index f88f752babd9..adae3fb7451a 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -43,7 +43,7 @@ static int vfat_revalidate_shortname(struct dentry *dentry)
43 43
44static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) 44static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
45{ 45{
46 if (nd->flags & LOOKUP_RCU) 46 if (nd && nd->flags & LOOKUP_RCU)
47 return -ECHILD; 47 return -ECHILD;
48 48
49 /* This is not negative dentry. Always valid. */ 49 /* This is not negative dentry. Always valid. */
@@ -54,7 +54,7 @@ static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
54 54
55static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) 55static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
56{ 56{
57 if (nd->flags & LOOKUP_RCU) 57 if (nd && nd->flags & LOOKUP_RCU)
58 return -ECHILD; 58 return -ECHILD;
59 59
60 /* 60 /*
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 83543b5ff941..8bd0ef9286c3 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -158,7 +158,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
158{ 158{
159 struct inode *inode; 159 struct inode *inode;
160 160
161 if (nd->flags & LOOKUP_RCU) 161 if (nd && nd->flags & LOOKUP_RCU)
162 return -ECHILD; 162 return -ECHILD;
163 163
164 inode = entry->d_inode; 164 inode = entry->d_inode;
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 4a456338b873..0da8da2c991d 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -44,7 +44,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
44 int error; 44 int error;
45 int had_lock = 0; 45 int had_lock = 0;
46 46
47 if (nd->flags & LOOKUP_RCU) 47 if (nd && nd->flags & LOOKUP_RCU)
48 return -ECHILD; 48 return -ECHILD;
49 49
50 parent = dget_parent(dentry); 50 parent = dget_parent(dentry);
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index afa66aaa2237..b4d70b13be92 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -238,46 +238,22 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
238} 238}
239 239
240/* 240/*
241 * hfs_unlink() 241 * hfs_remove()
242 * 242 *
243 * This is the unlink() entry in the inode_operations structure for 243 * This serves as both unlink() and rmdir() in the inode_operations
244 * regular HFS directories. The purpose is to delete an existing 244 * structure for regular HFS directories. The purpose is to delete
245 * file, given the inode for the parent directory and the name 245 * an existing child, given the inode for the parent directory and
246 * (and its length) of the existing file. 246 * the name (and its length) of the existing directory.
247 */
248static int hfs_unlink(struct inode *dir, struct dentry *dentry)
249{
250 struct inode *inode;
251 int res;
252
253 inode = dentry->d_inode;
254 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
255 if (res)
256 return res;
257
258 drop_nlink(inode);
259 hfs_delete_inode(inode);
260 inode->i_ctime = CURRENT_TIME_SEC;
261 mark_inode_dirty(inode);
262
263 return res;
264}
265
266/*
267 * hfs_rmdir()
268 * 247 *
269 * This is the rmdir() entry in the inode_operations structure for 248 * HFS does not have hardlinks, so both rmdir and unlink set the
270 * regular HFS directories. The purpose is to delete an existing 249 * link count to 0. The only difference is the emptiness check.
271 * directory, given the inode for the parent directory and the name
272 * (and its length) of the existing directory.
273 */ 250 */
274static int hfs_rmdir(struct inode *dir, struct dentry *dentry) 251static int hfs_remove(struct inode *dir, struct dentry *dentry)
275{ 252{
276 struct inode *inode; 253 struct inode *inode = dentry->d_inode;
277 int res; 254 int res;
278 255
279 inode = dentry->d_inode; 256 if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
280 if (inode->i_size != 2)
281 return -ENOTEMPTY; 257 return -ENOTEMPTY;
282 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); 258 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
283 if (res) 259 if (res)
@@ -307,7 +283,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
307 283
308 /* Unlink destination if it already exists */ 284 /* Unlink destination if it already exists */
309 if (new_dentry->d_inode) { 285 if (new_dentry->d_inode) {
310 res = hfs_unlink(new_dir, new_dentry); 286 res = hfs_remove(new_dir, new_dentry);
311 if (res) 287 if (res)
312 return res; 288 return res;
313 } 289 }
@@ -332,9 +308,9 @@ const struct file_operations hfs_dir_operations = {
332const struct inode_operations hfs_dir_inode_operations = { 308const struct inode_operations hfs_dir_inode_operations = {
333 .create = hfs_create, 309 .create = hfs_create,
334 .lookup = hfs_lookup, 310 .lookup = hfs_lookup,
335 .unlink = hfs_unlink, 311 .unlink = hfs_remove,
336 .mkdir = hfs_mkdir, 312 .mkdir = hfs_mkdir,
337 .rmdir = hfs_rmdir, 313 .rmdir = hfs_remove,
338 .rename = hfs_rename, 314 .rename = hfs_rename,
339 .setattr = hfs_inode_setattr, 315 .setattr = hfs_inode_setattr,
340}; 316};
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 81ead850ddb6..5a2b269428a6 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1600,7 +1600,7 @@ out:
1600 1600
1601static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) 1601static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd)
1602{ 1602{
1603 if (nd->flags & LOOKUP_RCU) 1603 if (nd && nd->flags & LOOKUP_RCU)
1604 return -ECHILD; 1604 return -ECHILD;
1605 /* 1605 /*
1606 * This is not negative dentry. Always valid. 1606 * This is not negative dentry. Always valid.
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index ce7337ddfdbf..6e6777f1b4b2 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -213,7 +213,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
213 new_de = minix_find_entry(new_dentry, &new_page); 213 new_de = minix_find_entry(new_dentry, &new_page);
214 if (!new_de) 214 if (!new_de)
215 goto out_dir; 215 goto out_dir;
216 inode_inc_link_count(old_inode);
217 minix_set_link(new_de, new_page, old_inode); 216 minix_set_link(new_de, new_page, old_inode);
218 new_inode->i_ctime = CURRENT_TIME_SEC; 217 new_inode->i_ctime = CURRENT_TIME_SEC;
219 if (dir_de) 218 if (dir_de)
@@ -225,18 +224,15 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
225 if (new_dir->i_nlink >= info->s_link_max) 224 if (new_dir->i_nlink >= info->s_link_max)
226 goto out_dir; 225 goto out_dir;
227 } 226 }
228 inode_inc_link_count(old_inode);
229 err = minix_add_link(new_dentry, old_inode); 227 err = minix_add_link(new_dentry, old_inode);
230 if (err) { 228 if (err)
231 inode_dec_link_count(old_inode);
232 goto out_dir; 229 goto out_dir;
233 }
234 if (dir_de) 230 if (dir_de)
235 inode_inc_link_count(new_dir); 231 inode_inc_link_count(new_dir);
236 } 232 }
237 233
238 minix_delete_entry(old_de, old_page); 234 minix_delete_entry(old_de, old_page);
239 inode_dec_link_count(old_inode); 235 mark_inode_dirty(old_inode);
240 236
241 if (dir_de) { 237 if (dir_de) {
242 minix_set_link(dir_de, dir_page, new_dir); 238 minix_set_link(dir_de, dir_page, new_dir);
diff --git a/fs/namei.c b/fs/namei.c
index 0087cf9c2c6b..a4689eb2df28 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1546,6 +1546,7 @@ static int path_walk(const char *name, struct nameidata *nd)
1546 /* nd->path had been dropped */ 1546 /* nd->path had been dropped */
1547 current->total_link_count = 0; 1547 current->total_link_count = 0;
1548 nd->path = save; 1548 nd->path = save;
1549 nd->inode = save.dentry->d_inode;
1549 path_get(&nd->path); 1550 path_get(&nd->path);
1550 nd->flags |= LOOKUP_REVAL; 1551 nd->flags |= LOOKUP_REVAL;
1551 result = link_path_walk(name, nd); 1552 result = link_path_walk(name, nd);
@@ -2455,22 +2456,29 @@ struct file *do_filp_open(int dfd, const char *pathname,
2455 /* !O_CREAT, simple open */ 2456 /* !O_CREAT, simple open */
2456 error = do_path_lookup(dfd, pathname, flags, &nd); 2457 error = do_path_lookup(dfd, pathname, flags, &nd);
2457 if (unlikely(error)) 2458 if (unlikely(error))
2458 goto out_filp; 2459 goto out_filp2;
2459 error = -ELOOP; 2460 error = -ELOOP;
2460 if (!(nd.flags & LOOKUP_FOLLOW)) { 2461 if (!(nd.flags & LOOKUP_FOLLOW)) {
2461 if (nd.inode->i_op->follow_link) 2462 if (nd.inode->i_op->follow_link)
2462 goto out_path; 2463 goto out_path2;
2463 } 2464 }
2464 error = -ENOTDIR; 2465 error = -ENOTDIR;
2465 if (nd.flags & LOOKUP_DIRECTORY) { 2466 if (nd.flags & LOOKUP_DIRECTORY) {
2466 if (!nd.inode->i_op->lookup) 2467 if (!nd.inode->i_op->lookup)
2467 goto out_path; 2468 goto out_path2;
2468 } 2469 }
2469 audit_inode(pathname, nd.path.dentry); 2470 audit_inode(pathname, nd.path.dentry);
2470 filp = finish_open(&nd, open_flag, acc_mode); 2471 filp = finish_open(&nd, open_flag, acc_mode);
2472out2:
2471 release_open_intent(&nd); 2473 release_open_intent(&nd);
2472 return filp; 2474 return filp;
2473 2475
2476out_path2:
2477 path_put(&nd.path);
2478out_filp2:
2479 filp = ERR_PTR(error);
2480 goto out2;
2481
2474creat: 2482creat:
2475 /* OK, have to create the file. Find the parent. */ 2483 /* OK, have to create the file. Find the parent. */
2476 error = path_init_rcu(dfd, pathname, 2484 error = path_init_rcu(dfd, pathname,
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 1cc600e77bb4..2f8e61816d75 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -37,6 +37,7 @@
37#include <linux/inet.h> 37#include <linux/inet.h>
38#include <linux/nfs_xdr.h> 38#include <linux/nfs_xdr.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/compat.h>
40 41
41#include <asm/system.h> 42#include <asm/system.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -89,7 +90,11 @@ int nfs_wait_bit_killable(void *word)
89 */ 90 */
90u64 nfs_compat_user_ino64(u64 fileid) 91u64 nfs_compat_user_ino64(u64 fileid)
91{ 92{
92 int ino; 93#ifdef CONFIG_COMPAT
94 compat_ulong_t ino;
95#else
96 unsigned long ino;
97#endif
93 98
94 if (enable_ino64) 99 if (enable_ino64)
95 return fileid; 100 return fileid;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 7a7474073148..1be36cf65bfc 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -298,6 +298,11 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
298#if defined(CONFIG_NFS_V4_1) 298#if defined(CONFIG_NFS_V4_1)
299struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp); 299struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
300struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp); 300struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
301extern void nfs4_schedule_session_recovery(struct nfs4_session *);
302#else
303static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
304{
305}
301#endif /* CONFIG_NFS_V4_1 */ 306#endif /* CONFIG_NFS_V4_1 */
302 307
303extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); 308extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
@@ -307,10 +312,9 @@ extern void nfs4_put_open_state(struct nfs4_state *);
307extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); 312extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t);
308extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); 313extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t);
309extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); 314extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
310extern void nfs4_schedule_state_recovery(struct nfs_client *); 315extern void nfs4_schedule_lease_recovery(struct nfs_client *);
311extern void nfs4_schedule_state_manager(struct nfs_client *); 316extern void nfs4_schedule_state_manager(struct nfs_client *);
312extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); 317extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
313extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
314extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 318extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
315extern void nfs41_handle_recall_slot(struct nfs_client *clp); 319extern void nfs41_handle_recall_slot(struct nfs_client *clp);
316extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 320extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index f5c9b125e8cc..b73c34375f60 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -219,6 +219,10 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
219 goto out_err; 219 goto out_err;
220 } 220 }
221 buf = kmalloc(rlen + 1, GFP_KERNEL); 221 buf = kmalloc(rlen + 1, GFP_KERNEL);
222 if (!buf) {
223 dprintk("%s: Not enough memory\n", __func__);
224 goto out_err;
225 }
222 buf[rlen] = '\0'; 226 buf[rlen] = '\0';
223 memcpy(buf, r_addr, rlen); 227 memcpy(buf, r_addr, rlen);
224 228
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 78936a8f40ab..0a07e353a961 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -256,12 +256,13 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
256 case -NFS4ERR_OPENMODE: 256 case -NFS4ERR_OPENMODE:
257 if (state == NULL) 257 if (state == NULL)
258 break; 258 break;
259 nfs4_state_mark_reclaim_nograce(clp, state); 259 nfs4_schedule_stateid_recovery(server, state);
260 goto do_state_recovery; 260 goto wait_on_recovery;
261 case -NFS4ERR_STALE_STATEID: 261 case -NFS4ERR_STALE_STATEID:
262 case -NFS4ERR_STALE_CLIENTID: 262 case -NFS4ERR_STALE_CLIENTID:
263 case -NFS4ERR_EXPIRED: 263 case -NFS4ERR_EXPIRED:
264 goto do_state_recovery; 264 nfs4_schedule_lease_recovery(clp);
265 goto wait_on_recovery;
265#if defined(CONFIG_NFS_V4_1) 266#if defined(CONFIG_NFS_V4_1)
266 case -NFS4ERR_BADSESSION: 267 case -NFS4ERR_BADSESSION:
267 case -NFS4ERR_BADSLOT: 268 case -NFS4ERR_BADSLOT:
@@ -272,7 +273,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
272 case -NFS4ERR_SEQ_MISORDERED: 273 case -NFS4ERR_SEQ_MISORDERED:
273 dprintk("%s ERROR: %d Reset session\n", __func__, 274 dprintk("%s ERROR: %d Reset session\n", __func__,
274 errorcode); 275 errorcode);
275 nfs4_schedule_state_recovery(clp); 276 nfs4_schedule_session_recovery(clp->cl_session);
276 exception->retry = 1; 277 exception->retry = 1;
277 break; 278 break;
278#endif /* defined(CONFIG_NFS_V4_1) */ 279#endif /* defined(CONFIG_NFS_V4_1) */
@@ -295,8 +296,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
295 } 296 }
296 /* We failed to handle the error */ 297 /* We failed to handle the error */
297 return nfs4_map_errors(ret); 298 return nfs4_map_errors(ret);
298do_state_recovery: 299wait_on_recovery:
299 nfs4_schedule_state_recovery(clp);
300 ret = nfs4_wait_clnt_recover(clp); 300 ret = nfs4_wait_clnt_recover(clp);
301 if (ret == 0) 301 if (ret == 0)
302 exception->retry = 1; 302 exception->retry = 1;
@@ -435,8 +435,8 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *
435 clp = res->sr_session->clp; 435 clp = res->sr_session->clp;
436 do_renew_lease(clp, timestamp); 436 do_renew_lease(clp, timestamp);
437 /* Check sequence flags */ 437 /* Check sequence flags */
438 if (atomic_read(&clp->cl_count) > 1) 438 if (res->sr_status_flags != 0)
439 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 439 nfs4_schedule_lease_recovery(clp);
440 break; 440 break;
441 case -NFS4ERR_DELAY: 441 case -NFS4ERR_DELAY:
442 /* The server detected a resend of the RPC call and 442 /* The server detected a resend of the RPC call and
@@ -1255,14 +1255,13 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
1255 case -NFS4ERR_BAD_HIGH_SLOT: 1255 case -NFS4ERR_BAD_HIGH_SLOT:
1256 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1256 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1257 case -NFS4ERR_DEADSESSION: 1257 case -NFS4ERR_DEADSESSION:
1258 nfs4_schedule_state_recovery( 1258 nfs4_schedule_session_recovery(server->nfs_client->cl_session);
1259 server->nfs_client);
1260 goto out; 1259 goto out;
1261 case -NFS4ERR_STALE_CLIENTID: 1260 case -NFS4ERR_STALE_CLIENTID:
1262 case -NFS4ERR_STALE_STATEID: 1261 case -NFS4ERR_STALE_STATEID:
1263 case -NFS4ERR_EXPIRED: 1262 case -NFS4ERR_EXPIRED:
1264 /* Don't recall a delegation if it was lost */ 1263 /* Don't recall a delegation if it was lost */
1265 nfs4_schedule_state_recovery(server->nfs_client); 1264 nfs4_schedule_lease_recovery(server->nfs_client);
1266 goto out; 1265 goto out;
1267 case -ERESTARTSYS: 1266 case -ERESTARTSYS:
1268 /* 1267 /*
@@ -1271,7 +1270,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
1271 */ 1270 */
1272 case -NFS4ERR_ADMIN_REVOKED: 1271 case -NFS4ERR_ADMIN_REVOKED:
1273 case -NFS4ERR_BAD_STATEID: 1272 case -NFS4ERR_BAD_STATEID:
1274 nfs4_state_mark_reclaim_nograce(server->nfs_client, state); 1273 nfs4_schedule_stateid_recovery(server, state);
1275 case -EKEYEXPIRED: 1274 case -EKEYEXPIRED:
1276 /* 1275 /*
1277 * User RPCSEC_GSS context has expired. 1276 * User RPCSEC_GSS context has expired.
@@ -1587,7 +1586,7 @@ static int nfs4_recover_expired_lease(struct nfs_server *server)
1587 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && 1586 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1588 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) 1587 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1589 break; 1588 break;
1590 nfs4_schedule_state_recovery(clp); 1589 nfs4_schedule_state_manager(clp);
1591 ret = -EIO; 1590 ret = -EIO;
1592 } 1591 }
1593 return ret; 1592 return ret;
@@ -3178,7 +3177,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3178 if (task->tk_status < 0) { 3177 if (task->tk_status < 0) {
3179 /* Unless we're shutting down, schedule state recovery! */ 3178 /* Unless we're shutting down, schedule state recovery! */
3180 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) 3179 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
3181 nfs4_schedule_state_recovery(clp); 3180 nfs4_schedule_lease_recovery(clp);
3182 return; 3181 return;
3183 } 3182 }
3184 do_renew_lease(clp, timestamp); 3183 do_renew_lease(clp, timestamp);
@@ -3252,6 +3251,35 @@ static void buf_to_pages(const void *buf, size_t buflen,
3252 } 3251 }
3253} 3252}
3254 3253
3254static int buf_to_pages_noslab(const void *buf, size_t buflen,
3255 struct page **pages, unsigned int *pgbase)
3256{
3257 struct page *newpage, **spages;
3258 int rc = 0;
3259 size_t len;
3260 spages = pages;
3261
3262 do {
3263 len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
3264 newpage = alloc_page(GFP_KERNEL);
3265
3266 if (newpage == NULL)
3267 goto unwind;
3268 memcpy(page_address(newpage), buf, len);
3269 buf += len;
3270 buflen -= len;
3271 *pages++ = newpage;
3272 rc++;
3273 } while (buflen != 0);
3274
3275 return rc;
3276
3277unwind:
3278 for(; rc > 0; rc--)
3279 __free_page(spages[rc-1]);
3280 return -ENOMEM;
3281}
3282
3255struct nfs4_cached_acl { 3283struct nfs4_cached_acl {
3256 int cached; 3284 int cached;
3257 size_t len; 3285 size_t len;
@@ -3420,13 +3448,23 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
3420 .rpc_argp = &arg, 3448 .rpc_argp = &arg,
3421 .rpc_resp = &res, 3449 .rpc_resp = &res,
3422 }; 3450 };
3423 int ret; 3451 int ret, i;
3424 3452
3425 if (!nfs4_server_supports_acls(server)) 3453 if (!nfs4_server_supports_acls(server))
3426 return -EOPNOTSUPP; 3454 return -EOPNOTSUPP;
3455 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3456 if (i < 0)
3457 return i;
3427 nfs_inode_return_delegation(inode); 3458 nfs_inode_return_delegation(inode);
3428 buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3429 ret = nfs4_call_sync(server, &msg, &arg, &res, 1); 3459 ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
3460
3461 /*
3462 * Free each page after tx, so the only ref left is
3463 * held by the network stack
3464 */
3465 for (; i > 0; i--)
3466 put_page(pages[i-1]);
3467
3430 /* 3468 /*
3431 * Acl update can result in inode attribute update. 3469 * Acl update can result in inode attribute update.
3432 * so mark the attribute cache invalid. 3470 * so mark the attribute cache invalid.
@@ -3464,12 +3502,13 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3464 case -NFS4ERR_OPENMODE: 3502 case -NFS4ERR_OPENMODE:
3465 if (state == NULL) 3503 if (state == NULL)
3466 break; 3504 break;
3467 nfs4_state_mark_reclaim_nograce(clp, state); 3505 nfs4_schedule_stateid_recovery(server, state);
3468 goto do_state_recovery; 3506 goto wait_on_recovery;
3469 case -NFS4ERR_STALE_STATEID: 3507 case -NFS4ERR_STALE_STATEID:
3470 case -NFS4ERR_STALE_CLIENTID: 3508 case -NFS4ERR_STALE_CLIENTID:
3471 case -NFS4ERR_EXPIRED: 3509 case -NFS4ERR_EXPIRED:
3472 goto do_state_recovery; 3510 nfs4_schedule_lease_recovery(clp);
3511 goto wait_on_recovery;
3473#if defined(CONFIG_NFS_V4_1) 3512#if defined(CONFIG_NFS_V4_1)
3474 case -NFS4ERR_BADSESSION: 3513 case -NFS4ERR_BADSESSION:
3475 case -NFS4ERR_BADSLOT: 3514 case -NFS4ERR_BADSLOT:
@@ -3480,7 +3519,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3480 case -NFS4ERR_SEQ_MISORDERED: 3519 case -NFS4ERR_SEQ_MISORDERED:
3481 dprintk("%s ERROR %d, Reset session\n", __func__, 3520 dprintk("%s ERROR %d, Reset session\n", __func__,
3482 task->tk_status); 3521 task->tk_status);
3483 nfs4_schedule_state_recovery(clp); 3522 nfs4_schedule_session_recovery(clp->cl_session);
3484 task->tk_status = 0; 3523 task->tk_status = 0;
3485 return -EAGAIN; 3524 return -EAGAIN;
3486#endif /* CONFIG_NFS_V4_1 */ 3525#endif /* CONFIG_NFS_V4_1 */
@@ -3497,9 +3536,8 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3497 } 3536 }
3498 task->tk_status = nfs4_map_errors(task->tk_status); 3537 task->tk_status = nfs4_map_errors(task->tk_status);
3499 return 0; 3538 return 0;
3500do_state_recovery: 3539wait_on_recovery:
3501 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 3540 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3502 nfs4_schedule_state_recovery(clp);
3503 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 3541 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3504 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 3542 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3505 task->tk_status = 0; 3543 task->tk_status = 0;
@@ -4110,7 +4148,7 @@ static void nfs4_lock_release(void *calldata)
4110 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 4148 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4111 data->arg.lock_seqid); 4149 data->arg.lock_seqid);
4112 if (!IS_ERR(task)) 4150 if (!IS_ERR(task))
4113 rpc_put_task(task); 4151 rpc_put_task_async(task);
4114 dprintk("%s: cancelling lock!\n", __func__); 4152 dprintk("%s: cancelling lock!\n", __func__);
4115 } else 4153 } else
4116 nfs_free_seqid(data->arg.lock_seqid); 4154 nfs_free_seqid(data->arg.lock_seqid);
@@ -4134,23 +4172,18 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = {
4134 4172
4135static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 4173static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4136{ 4174{
4137 struct nfs_client *clp = server->nfs_client;
4138 struct nfs4_state *state = lsp->ls_state;
4139
4140 switch (error) { 4175 switch (error) {
4141 case -NFS4ERR_ADMIN_REVOKED: 4176 case -NFS4ERR_ADMIN_REVOKED:
4142 case -NFS4ERR_BAD_STATEID: 4177 case -NFS4ERR_BAD_STATEID:
4143 case -NFS4ERR_EXPIRED: 4178 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4144 if (new_lock_owner != 0 || 4179 if (new_lock_owner != 0 ||
4145 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) 4180 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4146 nfs4_state_mark_reclaim_nograce(clp, state); 4181 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4147 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4148 break; 4182 break;
4149 case -NFS4ERR_STALE_STATEID: 4183 case -NFS4ERR_STALE_STATEID:
4150 if (new_lock_owner != 0 ||
4151 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4152 nfs4_state_mark_reclaim_reboot(clp, state);
4153 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4184 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4185 case -NFS4ERR_EXPIRED:
4186 nfs4_schedule_lease_recovery(server->nfs_client);
4154 }; 4187 };
4155} 4188}
4156 4189
@@ -4366,12 +4399,14 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4366 case -NFS4ERR_EXPIRED: 4399 case -NFS4ERR_EXPIRED:
4367 case -NFS4ERR_STALE_CLIENTID: 4400 case -NFS4ERR_STALE_CLIENTID:
4368 case -NFS4ERR_STALE_STATEID: 4401 case -NFS4ERR_STALE_STATEID:
4402 nfs4_schedule_lease_recovery(server->nfs_client);
4403 goto out;
4369 case -NFS4ERR_BADSESSION: 4404 case -NFS4ERR_BADSESSION:
4370 case -NFS4ERR_BADSLOT: 4405 case -NFS4ERR_BADSLOT:
4371 case -NFS4ERR_BAD_HIGH_SLOT: 4406 case -NFS4ERR_BAD_HIGH_SLOT:
4372 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4407 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4373 case -NFS4ERR_DEADSESSION: 4408 case -NFS4ERR_DEADSESSION:
4374 nfs4_schedule_state_recovery(server->nfs_client); 4409 nfs4_schedule_session_recovery(server->nfs_client->cl_session);
4375 goto out; 4410 goto out;
4376 case -ERESTARTSYS: 4411 case -ERESTARTSYS:
4377 /* 4412 /*
@@ -4381,7 +4416,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4381 case -NFS4ERR_ADMIN_REVOKED: 4416 case -NFS4ERR_ADMIN_REVOKED:
4382 case -NFS4ERR_BAD_STATEID: 4417 case -NFS4ERR_BAD_STATEID:
4383 case -NFS4ERR_OPENMODE: 4418 case -NFS4ERR_OPENMODE:
4384 nfs4_state_mark_reclaim_nograce(server->nfs_client, state); 4419 nfs4_schedule_stateid_recovery(server, state);
4385 err = 0; 4420 err = 0;
4386 goto out; 4421 goto out;
4387 case -EKEYEXPIRED: 4422 case -EKEYEXPIRED:
@@ -4988,10 +5023,20 @@ int nfs4_proc_create_session(struct nfs_client *clp)
4988 int status; 5023 int status;
4989 unsigned *ptr; 5024 unsigned *ptr;
4990 struct nfs4_session *session = clp->cl_session; 5025 struct nfs4_session *session = clp->cl_session;
5026 long timeout = 0;
5027 int err;
4991 5028
4992 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 5029 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
4993 5030
4994 status = _nfs4_proc_create_session(clp); 5031 do {
5032 status = _nfs4_proc_create_session(clp);
5033 if (status == -NFS4ERR_DELAY) {
5034 err = nfs4_delay(clp->cl_rpcclient, &timeout);
5035 if (err)
5036 status = err;
5037 }
5038 } while (status == -NFS4ERR_DELAY);
5039
4995 if (status) 5040 if (status)
4996 goto out; 5041 goto out;
4997 5042
@@ -5100,7 +5145,7 @@ static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client
5100 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5145 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5101 return -EAGAIN; 5146 return -EAGAIN;
5102 default: 5147 default:
5103 nfs4_schedule_state_recovery(clp); 5148 nfs4_schedule_lease_recovery(clp);
5104 } 5149 }
5105 return 0; 5150 return 0;
5106} 5151}
@@ -5187,7 +5232,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
5187 if (IS_ERR(task)) 5232 if (IS_ERR(task))
5188 ret = PTR_ERR(task); 5233 ret = PTR_ERR(task);
5189 else 5234 else
5190 rpc_put_task(task); 5235 rpc_put_task_async(task);
5191 dprintk("<-- %s status=%d\n", __func__, ret); 5236 dprintk("<-- %s status=%d\n", __func__, ret);
5192 return ret; 5237 return ret;
5193} 5238}
@@ -5203,8 +5248,13 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5203 goto out; 5248 goto out;
5204 } 5249 }
5205 ret = rpc_wait_for_completion_task(task); 5250 ret = rpc_wait_for_completion_task(task);
5206 if (!ret) 5251 if (!ret) {
5252 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
5253
5254 if (task->tk_status == 0)
5255 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
5207 ret = task->tk_status; 5256 ret = task->tk_status;
5257 }
5208 rpc_put_task(task); 5258 rpc_put_task(task);
5209out: 5259out:
5210 dprintk("<-- %s status=%d\n", __func__, ret); 5260 dprintk("<-- %s status=%d\n", __func__, ret);
@@ -5241,7 +5291,7 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
5241 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5291 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5242 return -EAGAIN; 5292 return -EAGAIN;
5243 default: 5293 default:
5244 nfs4_schedule_state_recovery(clp); 5294 nfs4_schedule_lease_recovery(clp);
5245 } 5295 }
5246 return 0; 5296 return 0;
5247} 5297}
@@ -5309,6 +5359,9 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
5309 status = PTR_ERR(task); 5359 status = PTR_ERR(task);
5310 goto out; 5360 goto out;
5311 } 5361 }
5362 status = nfs4_wait_for_completion_rpc_task(task);
5363 if (status == 0)
5364 status = task->tk_status;
5312 rpc_put_task(task); 5365 rpc_put_task(task);
5313 return 0; 5366 return 0;
5314out: 5367out:
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e6742b57a04c..0592288f9f06 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1007,9 +1007,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
1007} 1007}
1008 1008
1009/* 1009/*
1010 * Schedule a state recovery attempt 1010 * Schedule a lease recovery attempt
1011 */ 1011 */
1012void nfs4_schedule_state_recovery(struct nfs_client *clp) 1012void nfs4_schedule_lease_recovery(struct nfs_client *clp)
1013{ 1013{
1014 if (!clp) 1014 if (!clp)
1015 return; 1015 return;
@@ -1018,7 +1018,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp)
1018 nfs4_schedule_state_manager(clp); 1018 nfs4_schedule_state_manager(clp);
1019} 1019}
1020 1020
1021int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) 1021static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
1022{ 1022{
1023 1023
1024 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 1024 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
@@ -1032,7 +1032,7 @@ int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *st
1032 return 1; 1032 return 1;
1033} 1033}
1034 1034
1035int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) 1035static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
1036{ 1036{
1037 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); 1037 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
1038 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 1038 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
@@ -1041,6 +1041,14 @@ int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *s
1041 return 1; 1041 return 1;
1042} 1042}
1043 1043
1044void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
1045{
1046 struct nfs_client *clp = server->nfs_client;
1047
1048 nfs4_state_mark_reclaim_nograce(clp, state);
1049 nfs4_schedule_state_manager(clp);
1050}
1051
1044static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) 1052static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
1045{ 1053{
1046 struct inode *inode = state->inode; 1054 struct inode *inode = state->inode;
@@ -1436,10 +1444,15 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
1436} 1444}
1437 1445
1438#ifdef CONFIG_NFS_V4_1 1446#ifdef CONFIG_NFS_V4_1
1447void nfs4_schedule_session_recovery(struct nfs4_session *session)
1448{
1449 nfs4_schedule_lease_recovery(session->clp);
1450}
1451
1439void nfs41_handle_recall_slot(struct nfs_client *clp) 1452void nfs41_handle_recall_slot(struct nfs_client *clp)
1440{ 1453{
1441 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1454 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1442 nfs4_schedule_state_recovery(clp); 1455 nfs4_schedule_state_manager(clp);
1443} 1456}
1444 1457
1445static void nfs4_reset_all_state(struct nfs_client *clp) 1458static void nfs4_reset_all_state(struct nfs_client *clp)
@@ -1447,7 +1460,7 @@ static void nfs4_reset_all_state(struct nfs_client *clp)
1447 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1460 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1448 clp->cl_boot_time = CURRENT_TIME; 1461 clp->cl_boot_time = CURRENT_TIME;
1449 nfs4_state_start_reclaim_nograce(clp); 1462 nfs4_state_start_reclaim_nograce(clp);
1450 nfs4_schedule_state_recovery(clp); 1463 nfs4_schedule_state_manager(clp);
1451 } 1464 }
1452} 1465}
1453 1466
@@ -1455,7 +1468,7 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
1455{ 1468{
1456 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1469 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1457 nfs4_state_start_reclaim_reboot(clp); 1470 nfs4_state_start_reclaim_reboot(clp);
1458 nfs4_schedule_state_recovery(clp); 1471 nfs4_schedule_state_manager(clp);
1459 } 1472 }
1460} 1473}
1461 1474
@@ -1475,7 +1488,7 @@ static void nfs41_handle_cb_path_down(struct nfs_client *clp)
1475{ 1488{
1476 nfs_expire_all_delegations(clp); 1489 nfs_expire_all_delegations(clp);
1477 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 1490 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
1478 nfs4_schedule_state_recovery(clp); 1491 nfs4_schedule_state_manager(clp);
1479} 1492}
1480 1493
1481void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) 1494void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 4e2c168b6ee9..94d50e86a124 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1660,7 +1660,7 @@ static void encode_create_session(struct xdr_stream *xdr,
1660 1660
1661 p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); 1661 p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12);
1662 *p++ = cpu_to_be32(OP_CREATE_SESSION); 1662 *p++ = cpu_to_be32(OP_CREATE_SESSION);
1663 p = xdr_encode_hyper(p, clp->cl_ex_clid); 1663 p = xdr_encode_hyper(p, clp->cl_clientid);
1664 *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ 1664 *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */
1665 *p++ = cpu_to_be32(args->flags); /*flags */ 1665 *p++ = cpu_to_be32(args->flags); /*flags */
1666 1666
@@ -4694,7 +4694,7 @@ static int decode_exchange_id(struct xdr_stream *xdr,
4694 p = xdr_inline_decode(xdr, 8); 4694 p = xdr_inline_decode(xdr, 8);
4695 if (unlikely(!p)) 4695 if (unlikely(!p))
4696 goto out_overflow; 4696 goto out_overflow;
4697 xdr_decode_hyper(p, &clp->cl_ex_clid); 4697 xdr_decode_hyper(p, &clp->cl_clientid);
4698 p = xdr_inline_decode(xdr, 12); 4698 p = xdr_inline_decode(xdr, 12);
4699 if (unlikely(!p)) 4699 if (unlikely(!p))
4700 goto out_overflow; 4700 goto out_overflow;
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 903908a20023..c541093a5bf2 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -86,11 +86,14 @@
86/* Default path we try to mount. "%s" gets replaced by our IP address */ 86/* Default path we try to mount. "%s" gets replaced by our IP address */
87#define NFS_ROOT "/tftpboot/%s" 87#define NFS_ROOT "/tftpboot/%s"
88 88
89/* Default NFSROOT mount options. */
90#define NFS_DEF_OPTIONS "udp"
91
89/* Parameters passed from the kernel command line */ 92/* Parameters passed from the kernel command line */
90static char nfs_root_parms[256] __initdata = ""; 93static char nfs_root_parms[256] __initdata = "";
91 94
92/* Text-based mount options passed to super.c */ 95/* Text-based mount options passed to super.c */
93static char nfs_root_options[256] __initdata = ""; 96static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS;
94 97
95/* Address of NFS server */ 98/* Address of NFS server */
96static __be32 servaddr __initdata = htonl(INADDR_NONE); 99static __be32 servaddr __initdata = htonl(INADDR_NONE);
@@ -160,8 +163,14 @@ static int __init root_nfs_copy(char *dest, const char *src,
160} 163}
161 164
162static int __init root_nfs_cat(char *dest, const char *src, 165static int __init root_nfs_cat(char *dest, const char *src,
163 const size_t destlen) 166 const size_t destlen)
164{ 167{
168 size_t len = strlen(dest);
169
170 if (len && dest[len - 1] != ',')
171 if (strlcat(dest, ",", destlen) > destlen)
172 return -1;
173
165 if (strlcat(dest, src, destlen) > destlen) 174 if (strlcat(dest, src, destlen) > destlen)
166 return -1; 175 return -1;
167 return 0; 176 return 0;
@@ -194,16 +203,6 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath,
194 if (root_nfs_cat(nfs_root_options, incoming, 203 if (root_nfs_cat(nfs_root_options, incoming,
195 sizeof(nfs_root_options))) 204 sizeof(nfs_root_options)))
196 return -1; 205 return -1;
197
198 /*
199 * Possibly prepare for more options to be appended
200 */
201 if (nfs_root_options[0] != '\0' &&
202 nfs_root_options[strlen(nfs_root_options)] != ',')
203 if (root_nfs_cat(nfs_root_options, ",",
204 sizeof(nfs_root_options)))
205 return -1;
206
207 return 0; 206 return 0;
208} 207}
209 208
@@ -217,7 +216,7 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath,
217 */ 216 */
218static int __init root_nfs_data(char *cmdline) 217static int __init root_nfs_data(char *cmdline)
219{ 218{
220 char addr_option[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; 219 char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1];
221 int len, retval = -1; 220 int len, retval = -1;
222 char *tmp = NULL; 221 char *tmp = NULL;
223 const size_t tmplen = sizeof(nfs_export_path); 222 const size_t tmplen = sizeof(nfs_export_path);
@@ -244,9 +243,9 @@ static int __init root_nfs_data(char *cmdline)
244 * Append mandatory options for nfsroot so they override 243 * Append mandatory options for nfsroot so they override
245 * what has come before 244 * what has come before
246 */ 245 */
247 snprintf(addr_option, sizeof(addr_option), "nolock,addr=%pI4", 246 snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4",
248 &servaddr); 247 &servaddr);
249 if (root_nfs_cat(nfs_root_options, addr_option, 248 if (root_nfs_cat(nfs_root_options, mand_options,
250 sizeof(nfs_root_options))) 249 sizeof(nfs_root_options)))
251 goto out_optionstoolong; 250 goto out_optionstoolong;
252 251
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index e313a51acdd1..6481d537d69d 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -180,7 +180,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
180 task_setup_data.rpc_client = NFS_CLIENT(dir); 180 task_setup_data.rpc_client = NFS_CLIENT(dir);
181 task = rpc_run_task(&task_setup_data); 181 task = rpc_run_task(&task_setup_data);
182 if (!IS_ERR(task)) 182 if (!IS_ERR(task))
183 rpc_put_task(task); 183 rpc_put_task_async(task);
184 return 1; 184 return 1;
185} 185}
186 186
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c8278f4046cb..42b92d7a9cc4 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1292,6 +1292,8 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1292 task = rpc_run_task(&task_setup_data); 1292 task = rpc_run_task(&task_setup_data);
1293 if (IS_ERR(task)) 1293 if (IS_ERR(task))
1294 return PTR_ERR(task); 1294 return PTR_ERR(task);
1295 if (how & FLUSH_SYNC)
1296 rpc_wait_for_completion_task(task);
1295 rpc_put_task(task); 1297 rpc_put_task(task);
1296 return 0; 1298 return 0;
1297} 1299}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index cde36cb0f348..02eb4edf0ece 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -432,7 +432,7 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr,
432 * If the server returns different values for sessionID, slotID or 432 * If the server returns different values for sessionID, slotID or
433 * sequence number, the server is looney tunes. 433 * sequence number, the server is looney tunes.
434 */ 434 */
435 p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4); 435 p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
436 if (unlikely(p == NULL)) 436 if (unlikely(p == NULL))
437 goto out_overflow; 437 goto out_overflow;
438 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); 438 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 54b60bfceb8d..7b566ec14e18 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -2445,15 +2445,16 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2445static struct nfs4_delegation * 2445static struct nfs4_delegation *
2446find_delegation_file(struct nfs4_file *fp, stateid_t *stid) 2446find_delegation_file(struct nfs4_file *fp, stateid_t *stid)
2447{ 2447{
2448 struct nfs4_delegation *dp = NULL; 2448 struct nfs4_delegation *dp;
2449 2449
2450 spin_lock(&recall_lock); 2450 spin_lock(&recall_lock);
2451 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { 2451 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2452 if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) 2452 if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) {
2453 break; 2453 spin_unlock(&recall_lock);
2454 } 2454 return dp;
2455 }
2455 spin_unlock(&recall_lock); 2456 spin_unlock(&recall_lock);
2456 return dp; 2457 return NULL;
2457} 2458}
2458 2459
2459int share_access_to_flags(u32 share_access) 2460int share_access_to_flags(u32 share_access)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 1275b8655070..615f0a9f0600 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
1142 1142
1143 u32 dummy; 1143 u32 dummy;
1144 char *machine_name; 1144 char *machine_name;
1145 int i; 1145 int i, j;
1146 int nr_secflavs; 1146 int nr_secflavs;
1147 1147
1148 READ_BUF(16); 1148 READ_BUF(16);
@@ -1215,7 +1215,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
1215 READ_BUF(4); 1215 READ_BUF(4);
1216 READ32(dummy); 1216 READ32(dummy);
1217 READ_BUF(dummy * 4); 1217 READ_BUF(dummy * 4);
1218 for (i = 0; i < dummy; ++i) 1218 for (j = 0; j < dummy; ++j)
1219 READ32(dummy); 1219 READ32(dummy);
1220 break; 1220 break;
1221 case RPC_AUTH_GSS: 1221 case RPC_AUTH_GSS:
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 98034271cd02..161791d26458 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -397,7 +397,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
397 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); 397 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
398 if (!new_de) 398 if (!new_de)
399 goto out_dir; 399 goto out_dir;
400 inc_nlink(old_inode);
401 nilfs_set_link(new_dir, new_de, new_page, old_inode); 400 nilfs_set_link(new_dir, new_de, new_page, old_inode);
402 nilfs_mark_inode_dirty(new_dir); 401 nilfs_mark_inode_dirty(new_dir);
403 new_inode->i_ctime = CURRENT_TIME; 402 new_inode->i_ctime = CURRENT_TIME;
@@ -411,13 +410,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
411 if (new_dir->i_nlink >= NILFS_LINK_MAX) 410 if (new_dir->i_nlink >= NILFS_LINK_MAX)
412 goto out_dir; 411 goto out_dir;
413 } 412 }
414 inc_nlink(old_inode);
415 err = nilfs_add_link(new_dentry, old_inode); 413 err = nilfs_add_link(new_dentry, old_inode);
416 if (err) { 414 if (err)
417 drop_nlink(old_inode);
418 nilfs_mark_inode_dirty(old_inode);
419 goto out_dir; 415 goto out_dir;
420 }
421 if (dir_de) { 416 if (dir_de) {
422 inc_nlink(new_dir); 417 inc_nlink(new_dir);
423 nilfs_mark_inode_dirty(new_dir); 418 nilfs_mark_inode_dirty(new_dir);
@@ -431,7 +426,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
431 old_inode->i_ctime = CURRENT_TIME; 426 old_inode->i_ctime = CURRENT_TIME;
432 427
433 nilfs_delete_entry(old_de, old_page); 428 nilfs_delete_entry(old_de, old_page);
434 drop_nlink(old_inode);
435 429
436 if (dir_de) { 430 if (dir_de) {
437 nilfs_set_link(old_inode, dir_de, dir_page, new_dir); 431 nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 55ebae5c7f39..2de9f636792a 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
430 nilfs_segctor_map_segsum_entry( 430 nilfs_segctor_map_segsum_entry(
431 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); 431 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
432 432
433 if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) 433 if (NILFS_I(inode)->i_root &&
434 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
434 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 435 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
435 /* skip finfo */ 436 /* skip finfo */
436} 437}
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 6d80ecc7834f..7eb90403fc8a 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -56,7 +56,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
56 int ret = 0; /* if all else fails, just return false */ 56 int ret = 0; /* if all else fails, just return false */
57 struct ocfs2_super *osb; 57 struct ocfs2_super *osb;
58 58
59 if (nd->flags & LOOKUP_RCU) 59 if (nd && nd->flags & LOOKUP_RCU)
60 return -ECHILD; 60 return -ECHILD;
61 61
62 inode = dentry->d_inode; 62 inode = dentry->d_inode;
diff --git a/fs/open.c b/fs/open.c
index 5a2c6ebc22b5..b47aab39c057 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -233,6 +233,14 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
233 233
234 if (!(file->f_mode & FMODE_WRITE)) 234 if (!(file->f_mode & FMODE_WRITE))
235 return -EBADF; 235 return -EBADF;
236
237 /* It's not possible punch hole on append only file */
238 if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode))
239 return -EPERM;
240
241 if (IS_IMMUTABLE(inode))
242 return -EPERM;
243
236 /* 244 /*
237 * Revalidate the write permissions, in case security policy has 245 * Revalidate the write permissions, in case security policy has
238 * changed since the files were opened. 246 * changed since the files were opened.
diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c
index 48cec7cbca17..be03a0b08b47 100644
--- a/fs/partitions/osf.c
+++ b/fs/partitions/osf.c
@@ -10,10 +10,13 @@
10#include "check.h" 10#include "check.h"
11#include "osf.h" 11#include "osf.h"
12 12
13#define MAX_OSF_PARTITIONS 8
14
13int osf_partition(struct parsed_partitions *state) 15int osf_partition(struct parsed_partitions *state)
14{ 16{
15 int i; 17 int i;
16 int slot = 1; 18 int slot = 1;
19 unsigned int npartitions;
17 Sector sect; 20 Sector sect;
18 unsigned char *data; 21 unsigned char *data;
19 struct disklabel { 22 struct disklabel {
@@ -45,7 +48,7 @@ int osf_partition(struct parsed_partitions *state)
45 u8 p_fstype; 48 u8 p_fstype;
46 u8 p_frag; 49 u8 p_frag;
47 __le16 p_cpg; 50 __le16 p_cpg;
48 } d_partitions[8]; 51 } d_partitions[MAX_OSF_PARTITIONS];
49 } * label; 52 } * label;
50 struct d_partition * partition; 53 struct d_partition * partition;
51 54
@@ -63,7 +66,12 @@ int osf_partition(struct parsed_partitions *state)
63 put_dev_sector(sect); 66 put_dev_sector(sect);
64 return 0; 67 return 0;
65 } 68 }
66 for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) { 69 npartitions = le16_to_cpu(label->d_npartitions);
70 if (npartitions > MAX_OSF_PARTITIONS) {
71 put_dev_sector(sect);
72 return 0;
73 }
74 for (i = 0 ; i < npartitions; i++, partition++) {
67 if (slot == state->limit) 75 if (slot == state->limit)
68 break; 76 break;
69 if (le32_to_cpu(partition->p_size)) 77 if (le32_to_cpu(partition->p_size))
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9d096e82b201..d49c4b5d2c3e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2620,35 +2620,6 @@ static const struct pid_entry proc_base_stuff[] = {
2620 &proc_self_inode_operations, NULL, {}), 2620 &proc_self_inode_operations, NULL, {}),
2621}; 2621};
2622 2622
2623/*
2624 * Exceptional case: normally we are not allowed to unhash a busy
2625 * directory. In this case, however, we can do it - no aliasing problems
2626 * due to the way we treat inodes.
2627 */
2628static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
2629{
2630 struct inode *inode;
2631 struct task_struct *task;
2632
2633 if (nd->flags & LOOKUP_RCU)
2634 return -ECHILD;
2635
2636 inode = dentry->d_inode;
2637 task = get_proc_task(inode);
2638 if (task) {
2639 put_task_struct(task);
2640 return 1;
2641 }
2642 d_drop(dentry);
2643 return 0;
2644}
2645
2646static const struct dentry_operations proc_base_dentry_operations =
2647{
2648 .d_revalidate = proc_base_revalidate,
2649 .d_delete = pid_delete_dentry,
2650};
2651
2652static struct dentry *proc_base_instantiate(struct inode *dir, 2623static struct dentry *proc_base_instantiate(struct inode *dir,
2653 struct dentry *dentry, struct task_struct *task, const void *ptr) 2624 struct dentry *dentry, struct task_struct *task, const void *ptr)
2654{ 2625{
@@ -2685,7 +2656,6 @@ static struct dentry *proc_base_instantiate(struct inode *dir,
2685 if (p->fop) 2656 if (p->fop)
2686 inode->i_fop = p->fop; 2657 inode->i_fop = p->fop;
2687 ei->op = p->op; 2658 ei->op = p->op;
2688 d_set_d_op(dentry, &proc_base_dentry_operations);
2689 d_add(dentry, inode); 2659 d_add(dentry, inode);
2690 error = NULL; 2660 error = NULL;
2691out: 2661out:
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 176ce4cda68a..d6a7ca1fdac5 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -27,6 +27,7 @@
27static void proc_evict_inode(struct inode *inode) 27static void proc_evict_inode(struct inode *inode)
28{ 28{
29 struct proc_dir_entry *de; 29 struct proc_dir_entry *de;
30 struct ctl_table_header *head;
30 31
31 truncate_inode_pages(&inode->i_data, 0); 32 truncate_inode_pages(&inode->i_data, 0);
32 end_writeback(inode); 33 end_writeback(inode);
@@ -38,8 +39,11 @@ static void proc_evict_inode(struct inode *inode)
38 de = PROC_I(inode)->pde; 39 de = PROC_I(inode)->pde;
39 if (de) 40 if (de)
40 pde_put(de); 41 pde_put(de);
41 if (PROC_I(inode)->sysctl) 42 head = PROC_I(inode)->sysctl;
42 sysctl_head_put(PROC_I(inode)->sysctl); 43 if (head) {
44 rcu_assign_pointer(PROC_I(inode)->sysctl, NULL);
45 sysctl_head_put(head);
46 }
43} 47}
44 48
45struct vfsmount *proc_mnt; 49struct vfsmount *proc_mnt;
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index d9396a4fc7ff..927cbd115e53 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -233,7 +233,7 @@ void __init proc_device_tree_init(void)
233 return; 233 return;
234 root = of_find_node_by_path("/"); 234 root = of_find_node_by_path("/");
235 if (root == NULL) { 235 if (root == NULL) {
236 printk(KERN_ERR "/proc/device-tree: can't find root\n"); 236 pr_debug("/proc/device-tree: can't find root\n");
237 return; 237 return;
238 } 238 }
239 proc_device_tree_add_node(root, proc_device_tree); 239 proc_device_tree_add_node(root, proc_device_tree);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 09a1f92a34ef..8eb2522111c5 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -408,15 +408,18 @@ static int proc_sys_compare(const struct dentry *parent,
408 const struct dentry *dentry, const struct inode *inode, 408 const struct dentry *dentry, const struct inode *inode,
409 unsigned int len, const char *str, const struct qstr *name) 409 unsigned int len, const char *str, const struct qstr *name)
410{ 410{
411 struct ctl_table_header *head;
411 /* Although proc doesn't have negative dentries, rcu-walk means 412 /* Although proc doesn't have negative dentries, rcu-walk means
412 * that inode here can be NULL */ 413 * that inode here can be NULL */
414 /* AV: can it, indeed? */
413 if (!inode) 415 if (!inode)
414 return 0; 416 return 1;
415 if (name->len != len) 417 if (name->len != len)
416 return 1; 418 return 1;
417 if (memcmp(name->name, str, len)) 419 if (memcmp(name->name, str, len))
418 return 1; 420 return 1;
419 return !sysctl_is_seen(PROC_I(inode)->sysctl); 421 head = rcu_dereference(PROC_I(inode)->sysctl);
422 return !head || !sysctl_is_seen(head);
420} 423}
421 424
422static const struct dentry_operations proc_sys_dentry_operations = { 425static const struct dentry_operations proc_sys_dentry_operations = {
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index ba5f51ec3458..68fdf45cc6c9 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -771,7 +771,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
771 EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, 771 EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
772 dentry, inode, &security); 772 dentry, inode, &security);
773 if (retval) { 773 if (retval) {
774 dir->i_nlink--; 774 DEC_DIR_INODE_NLINK(dir)
775 goto out_failed; 775 goto out_failed;
776 } 776 }
777 777
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 3cfb2e933644..5c11ca82b782 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -978,8 +978,6 @@ int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
978 978
979static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) 979static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
980{ 980{
981 if (nd->flags & LOOKUP_RCU)
982 return -ECHILD;
983 return -EPERM; 981 return -EPERM;
984} 982}
985 983
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index b427b1208c26..e474fbcf8bde 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -245,7 +245,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
245 new_de = sysv_find_entry(new_dentry, &new_page); 245 new_de = sysv_find_entry(new_dentry, &new_page);
246 if (!new_de) 246 if (!new_de)
247 goto out_dir; 247 goto out_dir;
248 inode_inc_link_count(old_inode);
249 sysv_set_link(new_de, new_page, old_inode); 248 sysv_set_link(new_de, new_page, old_inode);
250 new_inode->i_ctime = CURRENT_TIME_SEC; 249 new_inode->i_ctime = CURRENT_TIME_SEC;
251 if (dir_de) 250 if (dir_de)
@@ -257,18 +256,15 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
257 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) 256 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max)
258 goto out_dir; 257 goto out_dir;
259 } 258 }
260 inode_inc_link_count(old_inode);
261 err = sysv_add_link(new_dentry, old_inode); 259 err = sysv_add_link(new_dentry, old_inode);
262 if (err) { 260 if (err)
263 inode_dec_link_count(old_inode);
264 goto out_dir; 261 goto out_dir;
265 }
266 if (dir_de) 262 if (dir_de)
267 inode_inc_link_count(new_dir); 263 inode_inc_link_count(new_dir);
268 } 264 }
269 265
270 sysv_delete_entry(old_de, old_page); 266 sysv_delete_entry(old_de, old_page);
271 inode_dec_link_count(old_inode); 267 mark_inode_dirty(old_inode);
272 268
273 if (dir_de) { 269 if (dir_de) {
274 sysv_set_link(dir_de, dir_page, new_dir); 270 sysv_set_link(dir_de, dir_page, new_dir);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 2be0f9eb86d2..b7c338d5e9df 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -32,6 +32,8 @@
32#include <linux/crc-itu-t.h> 32#include <linux/crc-itu-t.h>
33#include <linux/exportfs.h> 33#include <linux/exportfs.h>
34 34
35enum { UDF_MAX_LINKS = 0xffff };
36
35static inline int udf_match(int len1, const unsigned char *name1, int len2, 37static inline int udf_match(int len1, const unsigned char *name1, int len2,
36 const unsigned char *name2) 38 const unsigned char *name2)
37{ 39{
@@ -650,7 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
650 struct udf_inode_info *iinfo; 652 struct udf_inode_info *iinfo;
651 653
652 err = -EMLINK; 654 err = -EMLINK;
653 if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) 655 if (dir->i_nlink >= UDF_MAX_LINKS)
654 goto out; 656 goto out;
655 657
656 err = -EIO; 658 err = -EIO;
@@ -1034,9 +1036,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1034 struct fileIdentDesc cfi, *fi; 1036 struct fileIdentDesc cfi, *fi;
1035 int err; 1037 int err;
1036 1038
1037 if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { 1039 if (inode->i_nlink >= UDF_MAX_LINKS)
1038 return -EMLINK; 1040 return -EMLINK;
1039 }
1040 1041
1041 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); 1042 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
1042 if (!fi) { 1043 if (!fi) {
@@ -1131,9 +1132,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1131 goto end_rename; 1132 goto end_rename;
1132 1133
1133 retval = -EMLINK; 1134 retval = -EMLINK;
1134 if (!new_inode && 1135 if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS)
1135 new_dir->i_nlink >=
1136 (256 << sizeof(new_dir->i_nlink)) - 1)
1137 goto end_rename; 1136 goto end_rename;
1138 } 1137 }
1139 if (!nfi) { 1138 if (!nfi) {
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 12f39b9e4437..d6f681535eb8 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -306,7 +306,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
306 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); 306 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
307 if (!new_de) 307 if (!new_de)
308 goto out_dir; 308 goto out_dir;
309 inode_inc_link_count(old_inode);
310 ufs_set_link(new_dir, new_de, new_page, old_inode); 309 ufs_set_link(new_dir, new_de, new_page, old_inode);
311 new_inode->i_ctime = CURRENT_TIME_SEC; 310 new_inode->i_ctime = CURRENT_TIME_SEC;
312 if (dir_de) 311 if (dir_de)
@@ -318,12 +317,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
318 if (new_dir->i_nlink >= UFS_LINK_MAX) 317 if (new_dir->i_nlink >= UFS_LINK_MAX)
319 goto out_dir; 318 goto out_dir;
320 } 319 }
321 inode_inc_link_count(old_inode);
322 err = ufs_add_link(new_dentry, old_inode); 320 err = ufs_add_link(new_dentry, old_inode);
323 if (err) { 321 if (err)
324 inode_dec_link_count(old_inode);
325 goto out_dir; 322 goto out_dir;
326 }
327 if (dir_de) 323 if (dir_de)
328 inode_inc_link_count(new_dir); 324 inode_inc_link_count(new_dir);
329 } 325 }
@@ -331,12 +327,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
331 /* 327 /*
332 * Like most other Unix systems, set the ctime for inodes on a 328 * Like most other Unix systems, set the ctime for inodes on a
333 * rename. 329 * rename.
334 * inode_dec_link_count() will mark the inode dirty.
335 */ 330 */
336 old_inode->i_ctime = CURRENT_TIME_SEC; 331 old_inode->i_ctime = CURRENT_TIME_SEC;
337 332
338 ufs_delete_entry(old_dir, old_de, old_page); 333 ufs_delete_entry(old_dir, old_de, old_page);
339 inode_dec_link_count(old_inode); 334 mark_inode_dirty(old_inode);
340 335
341 if (dir_de) { 336 if (dir_de) {
342 ufs_set_link(old_inode, dir_de, dir_page, new_dir); 337 ufs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index f5e2a19e0f8e..0ca0e3c024d7 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1(
695 xfs_mount_t *mp, 695 xfs_mount_t *mp,
696 void __user *arg) 696 void __user *arg)
697{ 697{
698 xfs_fsop_geom_v1_t fsgeo; 698 xfs_fsop_geom_t fsgeo;
699 int error; 699 int error;
700 700
701 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); 701 error = xfs_fs_geometry(mp, &fsgeo, 3);
702 if (error) 702 if (error)
703 return -error; 703 return -error;
704 704
705 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) 705 /*
706 * Caller should have passed an argument of type
707 * xfs_fsop_geom_v1_t. This is a proper subset of the
708 * xfs_fsop_geom_t that xfs_fs_geometry() fills in.
709 */
710 if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t)))
706 return -XFS_ERROR(EFAULT); 711 return -XFS_ERROR(EFAULT);
707 return 0; 712 return 0;
708} 713}
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
index 5cb86c307f5d..fc4875433817 100644
--- a/include/keys/rxrpc-type.h
+++ b/include/keys/rxrpc-type.h
@@ -99,7 +99,6 @@ struct rxrpc_key_token {
99 * structure of raw payloads passed to add_key() or instantiate key 99 * structure of raw payloads passed to add_key() or instantiate key
100 */ 100 */
101struct rxrpc_key_data_v1 { 101struct rxrpc_key_data_v1 {
102 u32 kif_version; /* 1 */
103 u16 security_index; 102 u16 security_index;
104 u16 ticket_length; 103 u16 ticket_length;
105 u32 expiry; /* time_t */ 104 u32 expiry; /* time_t */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4d18ff34670a..d5063e1b5555 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q);
699extern void blk_stop_queue(struct request_queue *q); 699extern void blk_stop_queue(struct request_queue *q);
700extern void blk_sync_queue(struct request_queue *q); 700extern void blk_sync_queue(struct request_queue *q);
701extern void __blk_stop_queue(struct request_queue *q); 701extern void __blk_stop_queue(struct request_queue *q);
702extern void __blk_run_queue(struct request_queue *); 702extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
703extern void blk_run_queue(struct request_queue *); 703extern void blk_run_queue(struct request_queue *);
704extern int blk_rq_map_user(struct request_queue *, struct request *, 704extern int blk_rq_map_user(struct request_queue *, struct request *,
705 struct rq_map_data *, void __user *, unsigned long, 705 struct rq_map_data *, void __user *, unsigned long,
@@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p)
1088 1088
1089struct work_struct; 1089struct work_struct;
1090int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1090int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1091int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
1092 1091
1093#ifdef CONFIG_BLK_CGROUP 1092#ifdef CONFIG_BLK_CGROUP
1094/* 1093/*
@@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1136extern int blk_throtl_init(struct request_queue *q); 1135extern int blk_throtl_init(struct request_queue *q);
1137extern void blk_throtl_exit(struct request_queue *q); 1136extern void blk_throtl_exit(struct request_queue *q);
1138extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); 1137extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
1139extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
1140extern void throtl_shutdown_timer_wq(struct request_queue *q); 1138extern void throtl_shutdown_timer_wq(struct request_queue *q);
1141#else /* CONFIG_BLK_DEV_THROTTLING */ 1139#else /* CONFIG_BLK_DEV_THROTTLING */
1142static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) 1140static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
@@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
1146 1144
1147static inline int blk_throtl_init(struct request_queue *q) { return 0; } 1145static inline int blk_throtl_init(struct request_queue *q) { return 0; }
1148static inline int blk_throtl_exit(struct request_queue *q) { return 0; } 1146static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
1149static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
1150static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} 1147static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
1151#endif /* CONFIG_BLK_DEV_THROTTLING */ 1148#endif /* CONFIG_BLK_DEV_THROTTLING */
1152 1149
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3395cf7130f5..b22fb0d3db0f 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq)
245 245
246extern void blk_dump_cmd(char *buf, struct request *rq); 246extern void blk_dump_cmd(char *buf, struct request *rq);
247extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); 247extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
248extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
249 248
250#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ 249#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
251 250
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index c3011beac30d..31d91a64838b 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -123,6 +123,7 @@ struct ceph_msg_pos {
123#define SOCK_CLOSED 11 /* socket state changed to closed */ 123#define SOCK_CLOSED 11 /* socket state changed to closed */
124#define OPENING 13 /* open connection w/ (possibly new) peer */ 124#define OPENING 13 /* open connection w/ (possibly new) peer */
125#define DEAD 14 /* dead, about to kfree */ 125#define DEAD 14 /* dead, about to kfree */
126#define BACKOFF 15
126 127
127/* 128/*
128 * A single connection with another host. 129 * A single connection with another host.
@@ -160,7 +161,6 @@ struct ceph_connection {
160 struct list_head out_queue; 161 struct list_head out_queue;
161 struct list_head out_sent; /* sending or sent but unacked */ 162 struct list_head out_sent; /* sending or sent but unacked */
162 u64 out_seq; /* last message queued for send */ 163 u64 out_seq; /* last message queued for send */
163 bool out_keepalive_pending;
164 164
165 u64 in_seq, in_seq_acked; /* last message received, acked */ 165 u64 in_seq, in_seq_acked; /* last message received, acked */
166 166
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0b84c61607e8..dca31761b311 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -332,16 +332,19 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
332 return alloc_pages_current(gfp_mask, order); 332 return alloc_pages_current(gfp_mask, order);
333} 333}
334extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 334extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
335 struct vm_area_struct *vma, unsigned long addr); 335 struct vm_area_struct *vma, unsigned long addr,
336 int node);
336#else 337#else
337#define alloc_pages(gfp_mask, order) \ 338#define alloc_pages(gfp_mask, order) \
338 alloc_pages_node(numa_node_id(), gfp_mask, order) 339 alloc_pages_node(numa_node_id(), gfp_mask, order)
339#define alloc_pages_vma(gfp_mask, order, vma, addr) \ 340#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
340 alloc_pages(gfp_mask, order) 341 alloc_pages(gfp_mask, order)
341#endif 342#endif
342#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 343#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
343#define alloc_page_vma(gfp_mask, vma, addr) \ 344#define alloc_page_vma(gfp_mask, vma, addr) \
344 alloc_pages_vma(gfp_mask, 0, vma, addr) 345 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
346#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
347 alloc_pages_vma(gfp_mask, 0, vma, addr, node)
345 348
346extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 349extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
347extern unsigned long get_zeroed_page(gfp_t gfp_mask); 350extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 3fd36845ca45..ef4f0b6083a3 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -71,6 +71,7 @@ struct wm8994 {
71 u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; 71 u16 irq_masks_cache[WM8994_NUM_IRQ_REGS];
72 72
73 /* Used over suspend/resume */ 73 /* Used over suspend/resume */
74 bool suspended;
74 u16 ldo_regs[WM8994_NUM_LDO_REGS]; 75 u16 ldo_regs[WM8994_NUM_LDO_REGS];
75 u16 gpio_regs[WM8994_NUM_GPIO_REGS]; 76 u16 gpio_regs[WM8994_NUM_GPIO_REGS];
76 77
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d971346b0340..71caf7a5e6c6 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2392,6 +2392,9 @@ extern int netdev_notice(const struct net_device *dev, const char *format, ...)
2392extern int netdev_info(const struct net_device *dev, const char *format, ...) 2392extern int netdev_info(const struct net_device *dev, const char *format, ...)
2393 __attribute__ ((format (printf, 2, 3))); 2393 __attribute__ ((format (printf, 2, 3)));
2394 2394
2395#define MODULE_ALIAS_NETDEV(device) \
2396 MODULE_ALIAS("netdev-" device)
2397
2395#if defined(DEBUG) 2398#if defined(DEBUG)
2396#define netdev_dbg(__dev, format, args...) \ 2399#define netdev_dbg(__dev, format, args...) \
2397 netdev_printk(KERN_DEBUG, __dev, format, ##args) 2400 netdev_printk(KERN_DEBUG, __dev, format, ##args)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index b197563913bf..3e112de12d8d 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -68,11 +68,7 @@ struct nfs_client {
68 unsigned char cl_id_uniquifier; 68 unsigned char cl_id_uniquifier;
69 u32 cl_cb_ident; /* v4.0 callback identifier */ 69 u32 cl_cb_ident; /* v4.0 callback identifier */
70 const struct nfs4_minor_version_ops *cl_mvops; 70 const struct nfs4_minor_version_ops *cl_mvops;
71#endif /* CONFIG_NFS_V4 */
72 71
73#ifdef CONFIG_NFS_V4_1
74 /* clientid returned from EXCHANGE_ID, used by session operations */
75 u64 cl_ex_clid;
76 /* The sequence id to use for the next CREATE_SESSION */ 72 /* The sequence id to use for the next CREATE_SESSION */
77 u32 cl_seqid; 73 u32 cl_seqid;
78 /* The flags used for obtaining the clientid during EXCHANGE_ID */ 74 /* The flags used for obtaining the clientid during EXCHANGE_ID */
@@ -80,7 +76,7 @@ struct nfs_client {
80 struct nfs4_session *cl_session; /* sharred session */ 76 struct nfs4_session *cl_session; /* sharred session */
81 struct list_head cl_layouts; 77 struct list_head cl_layouts;
82 struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */ 78 struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */
83#endif /* CONFIG_NFS_V4_1 */ 79#endif /* CONFIG_NFS_V4 */
84 80
85#ifdef CONFIG_NFS_FSCACHE 81#ifdef CONFIG_NFS_FSCACHE
86 struct fscache_cookie *fscache; /* client index cache cookie */ 82 struct fscache_cookie *fscache; /* client index cache cookie */
@@ -185,7 +181,7 @@ struct nfs_server {
185/* maximum number of slots to use */ 181/* maximum number of slots to use */
186#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE 182#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE
187 183
188#if defined(CONFIG_NFS_V4_1) 184#if defined(CONFIG_NFS_V4)
189 185
190/* Sessions */ 186/* Sessions */
191#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long))) 187#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long)))
@@ -225,5 +221,5 @@ struct nfs4_session {
225 struct nfs_client *clp; 221 struct nfs_client *clp;
226}; 222};
227 223
228#endif /* CONFIG_NFS_V4_1 */ 224#endif /* CONFIG_NFS_V4 */
229#endif 225#endif
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 092a04f874a8..a1147e5dd245 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -102,11 +102,8 @@
102 102
103extern long arch_ptrace(struct task_struct *child, long request, 103extern long arch_ptrace(struct task_struct *child, long request,
104 unsigned long addr, unsigned long data); 104 unsigned long addr, unsigned long data);
105extern int ptrace_traceme(void);
106extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); 105extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
107extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 106extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
108extern int ptrace_attach(struct task_struct *tsk);
109extern int ptrace_detach(struct task_struct *, unsigned int);
110extern void ptrace_disable(struct task_struct *); 107extern void ptrace_disable(struct task_struct *);
111extern int ptrace_check_attach(struct task_struct *task, int kill); 108extern int ptrace_check_attach(struct task_struct *task, int kill);
112extern int ptrace_request(struct task_struct *child, long request, 109extern int ptrace_request(struct task_struct *child, long request,
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 88513fd8e208..d81db8012c63 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -212,6 +212,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
212struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 212struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
213 const struct rpc_call_ops *ops); 213 const struct rpc_call_ops *ops);
214void rpc_put_task(struct rpc_task *); 214void rpc_put_task(struct rpc_task *);
215void rpc_put_task_async(struct rpc_task *);
215void rpc_exit_task(struct rpc_task *); 216void rpc_exit_task(struct rpc_task *);
216void rpc_exit(struct rpc_task *, int); 217void rpc_exit(struct rpc_task *, int);
217void rpc_release_calldata(const struct rpc_call_ops *, void *); 218void rpc_release_calldata(const struct rpc_call_ops *, void *);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 7bb5cb64f3b8..11684d9e6bd2 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -930,6 +930,7 @@ enum
930 930
931#ifdef __KERNEL__ 931#ifdef __KERNEL__
932#include <linux/list.h> 932#include <linux/list.h>
933#include <linux/rcupdate.h>
933 934
934/* For the /proc/sys support */ 935/* For the /proc/sys support */
935struct ctl_table; 936struct ctl_table;
@@ -1037,10 +1038,15 @@ struct ctl_table_root {
1037 struct ctl_table trees. */ 1038 struct ctl_table trees. */
1038struct ctl_table_header 1039struct ctl_table_header
1039{ 1040{
1040 struct ctl_table *ctl_table; 1041 union {
1041 struct list_head ctl_entry; 1042 struct {
1042 int used; 1043 struct ctl_table *ctl_table;
1043 int count; 1044 struct list_head ctl_entry;
1045 int used;
1046 int count;
1047 };
1048 struct rcu_head rcu;
1049 };
1044 struct completion *unregistering; 1050 struct completion *unregistering;
1045 struct ctl_table *ctl_table_arg; 1051 struct ctl_table *ctl_table_arg;
1046 struct ctl_table_root *root; 1052 struct ctl_table_root *root;
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 246940511579..2e8ec51f0615 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -135,6 +135,8 @@ extern void transport_complete_task(struct se_task *, int);
135extern void transport_add_task_to_execute_queue(struct se_task *, 135extern void transport_add_task_to_execute_queue(struct se_task *,
136 struct se_task *, 136 struct se_task *,
137 struct se_device *); 137 struct se_device *);
138extern void transport_remove_task_from_execute_queue(struct se_task *,
139 struct se_device *);
138unsigned char *transport_dump_cmd_direction(struct se_cmd *); 140unsigned char *transport_dump_cmd_direction(struct se_cmd *);
139extern void transport_dump_dev_state(struct se_device *, char *, int *); 141extern void transport_dump_dev_state(struct se_device *, char *, int *);
140extern void transport_dump_dev_info(struct se_device *, struct se_lun *, 142extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index aba421d68f6f..78f18adb49c8 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
31 0 : blk_rq_sectors(rq); 31 0 : blk_rq_sectors(rq);
32 __entry->errors = rq->errors; 32 __entry->errors = rq->errors;
33 33
34 blk_fill_rwbs_rq(__entry->rwbs, rq); 34 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
35 blk_dump_cmd(__get_str(cmd), rq); 35 blk_dump_cmd(__get_str(cmd), rq);
36 ), 36 ),
37 37
@@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq,
118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
119 blk_rq_bytes(rq) : 0; 119 blk_rq_bytes(rq) : 0;
120 120
121 blk_fill_rwbs_rq(__entry->rwbs, rq); 121 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
122 blk_dump_cmd(__get_str(cmd), rq); 122 blk_dump_cmd(__get_str(cmd), rq);
123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
124 ), 124 ),
@@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap,
563 __entry->nr_sector = blk_rq_sectors(rq); 563 __entry->nr_sector = blk_rq_sectors(rq);
564 __entry->old_dev = dev; 564 __entry->old_dev = dev;
565 __entry->old_sector = from; 565 __entry->old_sector = from;
566 blk_fill_rwbs_rq(__entry->rwbs, rq); 566 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
567 ), 567 ),
568 568
569 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 569 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4349935c2ad8..e92e98189032 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1575,8 +1575,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1575 return -ENODEV; 1575 return -ENODEV;
1576 1576
1577 trialcs = alloc_trial_cpuset(cs); 1577 trialcs = alloc_trial_cpuset(cs);
1578 if (!trialcs) 1578 if (!trialcs) {
1579 return -ENOMEM; 1579 retval = -ENOMEM;
1580 goto out;
1581 }
1580 1582
1581 switch (cft->private) { 1583 switch (cft->private) {
1582 case FILE_CPULIST: 1584 case FILE_CPULIST:
@@ -1591,6 +1593,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1591 } 1593 }
1592 1594
1593 free_trial_cpuset(trialcs); 1595 free_trial_cpuset(trialcs);
1596out:
1594 cgroup_unlock(); 1597 cgroup_unlock();
1595 return retval; 1598 return retval;
1596} 1599}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 1708b1e2972d..e2302e40b360 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -163,7 +163,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
163 return !err; 163 return !err;
164} 164}
165 165
166int ptrace_attach(struct task_struct *task) 166static int ptrace_attach(struct task_struct *task)
167{ 167{
168 int retval; 168 int retval;
169 169
@@ -219,7 +219,7 @@ out:
219 * Performs checks and sets PT_PTRACED. 219 * Performs checks and sets PT_PTRACED.
220 * Should be used by all ptrace implementations for PTRACE_TRACEME. 220 * Should be used by all ptrace implementations for PTRACE_TRACEME.
221 */ 221 */
222int ptrace_traceme(void) 222static int ptrace_traceme(void)
223{ 223{
224 int ret = -EPERM; 224 int ret = -EPERM;
225 225
@@ -293,7 +293,7 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
293 return false; 293 return false;
294} 294}
295 295
296int ptrace_detach(struct task_struct *child, unsigned int data) 296static int ptrace_detach(struct task_struct *child, unsigned int data)
297{ 297{
298 bool dead = false; 298 bool dead = false;
299 299
diff --git a/kernel/sched.c b/kernel/sched.c
index 18d38e4ec7ba..42eab5a8437d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4213,6 +4213,7 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4213{ 4213{
4214 __wake_up_common(q, mode, 1, 0, key); 4214 __wake_up_common(q, mode, 1, 0, key);
4215} 4215}
4216EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4216 4217
4217/** 4218/**
4218 * __wake_up_sync_key - wake up threads blocked on a waitqueue. 4219 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index ad6267714c84..01f75a5f17af 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
210 210
211static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 211static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
212{ 212{
213 int this_cpu = smp_processor_id();
214 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 213 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
215 struct sched_rt_entity *rt_se; 214 struct sched_rt_entity *rt_se;
216 215
217 rt_se = rt_rq->tg->rt_se[this_cpu]; 216 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
217
218 rt_se = rt_rq->tg->rt_se[cpu];
218 219
219 if (rt_rq->rt_nr_running) { 220 if (rt_rq->rt_nr_running) {
220 if (rt_se && !on_rt_rq(rt_se)) 221 if (rt_se && !on_rt_rq(rt_se))
@@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
226 227
227static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 228static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
228{ 229{
229 int this_cpu = smp_processor_id();
230 struct sched_rt_entity *rt_se; 230 struct sched_rt_entity *rt_se;
231 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
231 232
232 rt_se = rt_rq->tg->rt_se[this_cpu]; 233 rt_se = rt_rq->tg->rt_se[cpu];
233 234
234 if (rt_se && on_rt_rq(rt_se)) 235 if (rt_se && on_rt_rq(rt_se))
235 dequeue_rt_entity(rt_se); 236 dequeue_rt_entity(rt_se);
@@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
565 if (rt_rq->rt_time || rt_rq->rt_nr_running) 566 if (rt_rq->rt_time || rt_rq->rt_nr_running)
566 idle = 0; 567 idle = 0;
567 raw_spin_unlock(&rt_rq->rt_runtime_lock); 568 raw_spin_unlock(&rt_rq->rt_runtime_lock);
568 } else if (rt_rq->rt_nr_running) 569 } else if (rt_rq->rt_nr_running) {
569 idle = 0; 570 idle = 0;
571 if (!rt_rq_throttled(rt_rq))
572 enqueue = 1;
573 }
570 574
571 if (enqueue) 575 if (enqueue)
572 sched_rt_rq_enqueue(rt_rq); 576 sched_rt_rq_enqueue(rt_rq);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0f1bd83db985..4eed0af5d144 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -194,9 +194,9 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
194static struct ctl_table root_table[]; 194static struct ctl_table root_table[];
195static struct ctl_table_root sysctl_table_root; 195static struct ctl_table_root sysctl_table_root;
196static struct ctl_table_header root_table_header = { 196static struct ctl_table_header root_table_header = {
197 .count = 1, 197 {{.count = 1,
198 .ctl_table = root_table, 198 .ctl_table = root_table,
199 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list), 199 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}},
200 .root = &sysctl_table_root, 200 .root = &sysctl_table_root,
201 .set = &sysctl_table_root.default_set, 201 .set = &sysctl_table_root.default_set,
202}; 202};
@@ -1567,11 +1567,16 @@ void sysctl_head_get(struct ctl_table_header *head)
1567 spin_unlock(&sysctl_lock); 1567 spin_unlock(&sysctl_lock);
1568} 1568}
1569 1569
1570static void free_head(struct rcu_head *rcu)
1571{
1572 kfree(container_of(rcu, struct ctl_table_header, rcu));
1573}
1574
1570void sysctl_head_put(struct ctl_table_header *head) 1575void sysctl_head_put(struct ctl_table_header *head)
1571{ 1576{
1572 spin_lock(&sysctl_lock); 1577 spin_lock(&sysctl_lock);
1573 if (!--head->count) 1578 if (!--head->count)
1574 kfree(head); 1579 call_rcu(&head->rcu, free_head);
1575 spin_unlock(&sysctl_lock); 1580 spin_unlock(&sysctl_lock);
1576} 1581}
1577 1582
@@ -1948,10 +1953,10 @@ void unregister_sysctl_table(struct ctl_table_header * header)
1948 start_unregistering(header); 1953 start_unregistering(header);
1949 if (!--header->parent->count) { 1954 if (!--header->parent->count) {
1950 WARN_ON(1); 1955 WARN_ON(1);
1951 kfree(header->parent); 1956 call_rcu(&header->parent->rcu, free_head);
1952 } 1957 }
1953 if (!--header->count) 1958 if (!--header->count)
1954 kfree(header); 1959 call_rcu(&header->rcu, free_head);
1955 spin_unlock(&sysctl_lock); 1960 spin_unlock(&sysctl_lock);
1956} 1961}
1957 1962
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index d95721f33702..cbafed7d4f38 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1827 rwbs[i] = '\0'; 1827 rwbs[i] = '\0';
1828} 1828}
1829 1829
1830void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1831{
1832 int rw = rq->cmd_flags & 0x03;
1833 int bytes;
1834
1835 if (rq->cmd_flags & REQ_DISCARD)
1836 rw |= REQ_DISCARD;
1837
1838 if (rq->cmd_flags & REQ_SECURE)
1839 rw |= REQ_SECURE;
1840
1841 bytes = blk_rq_bytes(rq);
1842
1843 blk_fill_rwbs(rwbs, rw, bytes);
1844}
1845
1846#endif /* CONFIG_EVENT_TRACING */ 1830#endif /* CONFIG_EVENT_TRACING */
1847 1831
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 5021cbc34411..ac09f2226dc7 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n)
148{ 148{
149 int i, len = 0; 149 int i, len = 0;
150 150
151 for (i = 0; i < n; i++) { 151 for (i = 0; i < n; i++, p++) {
152 if (p->len) 152 if (p->len)
153 len += nla_total_size(p->len); 153 len += nla_total_size(p->len);
154 else if (nla_attr_minlen[p->type]) 154 else if (nla_attr_minlen[p->type])
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 3e29781ee762..113e35c47502 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -650,10 +650,10 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag)
650 650
651static inline struct page *alloc_hugepage_vma(int defrag, 651static inline struct page *alloc_hugepage_vma(int defrag,
652 struct vm_area_struct *vma, 652 struct vm_area_struct *vma,
653 unsigned long haddr) 653 unsigned long haddr, int nd)
654{ 654{
655 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), 655 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
656 HPAGE_PMD_ORDER, vma, haddr); 656 HPAGE_PMD_ORDER, vma, haddr, nd);
657} 657}
658 658
659#ifndef CONFIG_NUMA 659#ifndef CONFIG_NUMA
@@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
678 if (unlikely(khugepaged_enter(vma))) 678 if (unlikely(khugepaged_enter(vma)))
679 return VM_FAULT_OOM; 679 return VM_FAULT_OOM;
680 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 680 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
681 vma, haddr); 681 vma, haddr, numa_node_id());
682 if (unlikely(!page)) 682 if (unlikely(!page))
683 goto out; 683 goto out;
684 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 684 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
@@ -799,8 +799,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
799 } 799 }
800 800
801 for (i = 0; i < HPAGE_PMD_NR; i++) { 801 for (i = 0; i < HPAGE_PMD_NR; i++) {
802 pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 802 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE,
803 vma, address); 803 vma, address, page_to_nid(page));
804 if (unlikely(!pages[i] || 804 if (unlikely(!pages[i] ||
805 mem_cgroup_newpage_charge(pages[i], mm, 805 mem_cgroup_newpage_charge(pages[i], mm,
806 GFP_KERNEL))) { 806 GFP_KERNEL))) {
@@ -902,7 +902,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
902 if (transparent_hugepage_enabled(vma) && 902 if (transparent_hugepage_enabled(vma) &&
903 !transparent_hugepage_debug_cow()) 903 !transparent_hugepage_debug_cow())
904 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 904 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
905 vma, haddr); 905 vma, haddr, numa_node_id());
906 else 906 else
907 new_page = NULL; 907 new_page = NULL;
908 908
@@ -1745,7 +1745,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1745static void collapse_huge_page(struct mm_struct *mm, 1745static void collapse_huge_page(struct mm_struct *mm,
1746 unsigned long address, 1746 unsigned long address,
1747 struct page **hpage, 1747 struct page **hpage,
1748 struct vm_area_struct *vma) 1748 struct vm_area_struct *vma,
1749 int node)
1749{ 1750{
1750 pgd_t *pgd; 1751 pgd_t *pgd;
1751 pud_t *pud; 1752 pud_t *pud;
@@ -1761,6 +1762,10 @@ static void collapse_huge_page(struct mm_struct *mm,
1761#ifndef CONFIG_NUMA 1762#ifndef CONFIG_NUMA
1762 VM_BUG_ON(!*hpage); 1763 VM_BUG_ON(!*hpage);
1763 new_page = *hpage; 1764 new_page = *hpage;
1765 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1766 up_read(&mm->mmap_sem);
1767 return;
1768 }
1764#else 1769#else
1765 VM_BUG_ON(*hpage); 1770 VM_BUG_ON(*hpage);
1766 /* 1771 /*
@@ -1773,18 +1778,19 @@ static void collapse_huge_page(struct mm_struct *mm,
1773 * mmap_sem in read mode is good idea also to allow greater 1778 * mmap_sem in read mode is good idea also to allow greater
1774 * scalability. 1779 * scalability.
1775 */ 1780 */
1776 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address); 1781 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1782 node);
1777 if (unlikely(!new_page)) { 1783 if (unlikely(!new_page)) {
1778 up_read(&mm->mmap_sem); 1784 up_read(&mm->mmap_sem);
1779 *hpage = ERR_PTR(-ENOMEM); 1785 *hpage = ERR_PTR(-ENOMEM);
1780 return; 1786 return;
1781 } 1787 }
1782#endif
1783 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1788 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1784 up_read(&mm->mmap_sem); 1789 up_read(&mm->mmap_sem);
1785 put_page(new_page); 1790 put_page(new_page);
1786 return; 1791 return;
1787 } 1792 }
1793#endif
1788 1794
1789 /* after allocating the hugepage upgrade to mmap_sem write mode */ 1795 /* after allocating the hugepage upgrade to mmap_sem write mode */
1790 up_read(&mm->mmap_sem); 1796 up_read(&mm->mmap_sem);
@@ -1919,6 +1925,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
1919 struct page *page; 1925 struct page *page;
1920 unsigned long _address; 1926 unsigned long _address;
1921 spinlock_t *ptl; 1927 spinlock_t *ptl;
1928 int node = -1;
1922 1929
1923 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1930 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1924 1931
@@ -1949,6 +1956,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
1949 page = vm_normal_page(vma, _address, pteval); 1956 page = vm_normal_page(vma, _address, pteval);
1950 if (unlikely(!page)) 1957 if (unlikely(!page))
1951 goto out_unmap; 1958 goto out_unmap;
1959 /*
1960 * Chose the node of the first page. This could
1961 * be more sophisticated and look at more pages,
1962 * but isn't for now.
1963 */
1964 if (node == -1)
1965 node = page_to_nid(page);
1952 VM_BUG_ON(PageCompound(page)); 1966 VM_BUG_ON(PageCompound(page));
1953 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 1967 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
1954 goto out_unmap; 1968 goto out_unmap;
@@ -1965,7 +1979,7 @@ out_unmap:
1965 pte_unmap_unlock(pte, ptl); 1979 pte_unmap_unlock(pte, ptl);
1966 if (ret) 1980 if (ret)
1967 /* collapse_huge_page will return with the mmap_sem released */ 1981 /* collapse_huge_page will return with the mmap_sem released */
1968 collapse_huge_page(mm, address, hpage, vma); 1982 collapse_huge_page(mm, address, hpage, vma, node);
1969out: 1983out:
1970 return ret; 1984 return ret;
1971} 1985}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 49355a970be2..b53ec99f1428 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1524} 1524}
1525 1525
1526/* Return a zonelist indicated by gfp for node representing a mempolicy */ 1526/* Return a zonelist indicated by gfp for node representing a mempolicy */
1527static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) 1527static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1528 int nd)
1528{ 1529{
1529 int nd = numa_node_id();
1530
1531 switch (policy->mode) { 1530 switch (policy->mode) {
1532 case MPOL_PREFERRED: 1531 case MPOL_PREFERRED:
1533 if (!(policy->flags & MPOL_F_LOCAL)) 1532 if (!(policy->flags & MPOL_F_LOCAL))
@@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1679 zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1678 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1680 huge_page_shift(hstate_vma(vma))), gfp_flags); 1679 huge_page_shift(hstate_vma(vma))), gfp_flags);
1681 } else { 1680 } else {
1682 zl = policy_zonelist(gfp_flags, *mpol); 1681 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1683 if ((*mpol)->mode == MPOL_BIND) 1682 if ((*mpol)->mode == MPOL_BIND)
1684 *nodemask = &(*mpol)->v.nodes; 1683 *nodemask = &(*mpol)->v.nodes;
1685 } 1684 }
@@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1820 */ 1819 */
1821struct page * 1820struct page *
1822alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1821alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1823 unsigned long addr) 1822 unsigned long addr, int node)
1824{ 1823{
1825 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1824 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1826 struct zonelist *zl; 1825 struct zonelist *zl;
@@ -1836,7 +1835,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1836 put_mems_allowed(); 1835 put_mems_allowed();
1837 return page; 1836 return page;
1838 } 1837 }
1839 zl = policy_zonelist(gfp, pol); 1838 zl = policy_zonelist(gfp, pol, node);
1840 if (unlikely(mpol_needs_cond_ref(pol))) { 1839 if (unlikely(mpol_needs_cond_ref(pol))) {
1841 /* 1840 /*
1842 * slow path: ref counted shared policy 1841 * slow path: ref counted shared policy
@@ -1892,7 +1891,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1892 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 1891 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1893 else 1892 else
1894 page = __alloc_pages_nodemask(gfp, order, 1893 page = __alloc_pages_nodemask(gfp, order,
1895 policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); 1894 policy_zonelist(gfp, pol, numa_node_id()),
1895 policy_nodemask(gfp, pol));
1896 put_mems_allowed(); 1896 put_mems_allowed();
1897 return page; 1897 return page;
1898} 1898}
diff --git a/mm/rmap.c b/mm/rmap.c
index f21f4a1d6a1c..941bf82e8961 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -497,41 +497,51 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
497 struct mm_struct *mm = vma->vm_mm; 497 struct mm_struct *mm = vma->vm_mm;
498 int referenced = 0; 498 int referenced = 0;
499 499
500 /*
501 * Don't want to elevate referenced for mlocked page that gets this far,
502 * in order that it progresses to try_to_unmap and is moved to the
503 * unevictable list.
504 */
505 if (vma->vm_flags & VM_LOCKED) {
506 *mapcount = 0; /* break early from loop */
507 *vm_flags |= VM_LOCKED;
508 goto out;
509 }
510
511 /* Pretend the page is referenced if the task has the
512 swap token and is in the middle of a page fault. */
513 if (mm != current->mm && has_swap_token(mm) &&
514 rwsem_is_locked(&mm->mmap_sem))
515 referenced++;
516
517 if (unlikely(PageTransHuge(page))) { 500 if (unlikely(PageTransHuge(page))) {
518 pmd_t *pmd; 501 pmd_t *pmd;
519 502
520 spin_lock(&mm->page_table_lock); 503 spin_lock(&mm->page_table_lock);
504 /*
505 * rmap might return false positives; we must filter
506 * these out using page_check_address_pmd().
507 */
521 pmd = page_check_address_pmd(page, mm, address, 508 pmd = page_check_address_pmd(page, mm, address,
522 PAGE_CHECK_ADDRESS_PMD_FLAG); 509 PAGE_CHECK_ADDRESS_PMD_FLAG);
523 if (pmd && !pmd_trans_splitting(*pmd) && 510 if (!pmd) {
524 pmdp_clear_flush_young_notify(vma, address, pmd)) 511 spin_unlock(&mm->page_table_lock);
512 goto out;
513 }
514
515 if (vma->vm_flags & VM_LOCKED) {
516 spin_unlock(&mm->page_table_lock);
517 *mapcount = 0; /* break early from loop */
518 *vm_flags |= VM_LOCKED;
519 goto out;
520 }
521
522 /* go ahead even if the pmd is pmd_trans_splitting() */
523 if (pmdp_clear_flush_young_notify(vma, address, pmd))
525 referenced++; 524 referenced++;
526 spin_unlock(&mm->page_table_lock); 525 spin_unlock(&mm->page_table_lock);
527 } else { 526 } else {
528 pte_t *pte; 527 pte_t *pte;
529 spinlock_t *ptl; 528 spinlock_t *ptl;
530 529
530 /*
531 * rmap might return false positives; we must filter
532 * these out using page_check_address().
533 */
531 pte = page_check_address(page, mm, address, &ptl, 0); 534 pte = page_check_address(page, mm, address, &ptl, 0);
532 if (!pte) 535 if (!pte)
533 goto out; 536 goto out;
534 537
538 if (vma->vm_flags & VM_LOCKED) {
539 pte_unmap_unlock(pte, ptl);
540 *mapcount = 0; /* break early from loop */
541 *vm_flags |= VM_LOCKED;
542 goto out;
543 }
544
535 if (ptep_clear_flush_young_notify(vma, address, pte)) { 545 if (ptep_clear_flush_young_notify(vma, address, pte)) {
536 /* 546 /*
537 * Don't treat a reference through a sequentially read 547 * Don't treat a reference through a sequentially read
@@ -546,6 +556,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
546 pte_unmap_unlock(pte, ptl); 556 pte_unmap_unlock(pte, ptl);
547 } 557 }
548 558
559 /* Pretend the page is referenced if the task has the
560 swap token and is in the middle of a page fault. */
561 if (mm != current->mm && has_swap_token(mm) &&
562 rwsem_is_locked(&mm->mmap_sem))
563 referenced++;
564
549 (*mapcount)--; 565 (*mapcount)--;
550 566
551 if (referenced) 567 if (referenced)
diff --git a/net/Makefile b/net/Makefile
index a3330ebe2c53..a51d9465e628 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -19,9 +19,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
19obj-$(CONFIG_INET) += ipv4/ 19obj-$(CONFIG_INET) += ipv4/
20obj-$(CONFIG_XFRM) += xfrm/ 20obj-$(CONFIG_XFRM) += xfrm/
21obj-$(CONFIG_UNIX) += unix/ 21obj-$(CONFIG_UNIX) += unix/
22ifneq ($(CONFIG_IPV6),) 22obj-$(CONFIG_NET) += ipv6/
23obj-y += ipv6/
24endif
25obj-$(CONFIG_PACKET) += packet/ 23obj-$(CONFIG_PACKET) += packet/
26obj-$(CONFIG_NET_KEY) += key/ 24obj-$(CONFIG_NET_KEY) += key/
27obj-$(CONFIG_BRIDGE) += bridge/ 25obj-$(CONFIG_BRIDGE) += bridge/
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 9190ae462cb4..6dee7bf648a9 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -6,6 +6,7 @@ config BRIDGE
6 tristate "802.1d Ethernet Bridging" 6 tristate "802.1d Ethernet Bridging"
7 select LLC 7 select LLC
8 select STP 8 select STP
9 depends on IPV6 || IPV6=n
9 ---help--- 10 ---help---
10 If you say Y here, then your Linux box will be able to act as an 11 If you say Y here, then your Linux box will be able to act as an
11 Ethernet bridge, which means that the different Ethernet segments it 12 Ethernet bridge, which means that the different Ethernet segments it
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 35b36b86d762..05f357828a2f 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -336,7 +336,6 @@ static void reset_connection(struct ceph_connection *con)
336 ceph_msg_put(con->out_msg); 336 ceph_msg_put(con->out_msg);
337 con->out_msg = NULL; 337 con->out_msg = NULL;
338 } 338 }
339 con->out_keepalive_pending = false;
340 con->in_seq = 0; 339 con->in_seq = 0;
341 con->in_seq_acked = 0; 340 con->in_seq_acked = 0;
342} 341}
@@ -1248,8 +1247,6 @@ static int process_connect(struct ceph_connection *con)
1248 con->auth_retry); 1247 con->auth_retry);
1249 if (con->auth_retry == 2) { 1248 if (con->auth_retry == 2) {
1250 con->error_msg = "connect authorization failure"; 1249 con->error_msg = "connect authorization failure";
1251 reset_connection(con);
1252 set_bit(CLOSED, &con->state);
1253 return -1; 1250 return -1;
1254 } 1251 }
1255 con->auth_retry = 1; 1252 con->auth_retry = 1;
@@ -1715,14 +1712,6 @@ more:
1715 1712
1716 /* open the socket first? */ 1713 /* open the socket first? */
1717 if (con->sock == NULL) { 1714 if (con->sock == NULL) {
1718 /*
1719 * if we were STANDBY and are reconnecting _this_
1720 * connection, bump connect_seq now. Always bump
1721 * global_seq.
1722 */
1723 if (test_and_clear_bit(STANDBY, &con->state))
1724 con->connect_seq++;
1725
1726 prepare_write_banner(msgr, con); 1715 prepare_write_banner(msgr, con);
1727 prepare_write_connect(msgr, con, 1); 1716 prepare_write_connect(msgr, con, 1);
1728 prepare_read_banner(con); 1717 prepare_read_banner(con);
@@ -1951,7 +1940,24 @@ static void con_work(struct work_struct *work)
1951 work.work); 1940 work.work);
1952 1941
1953 mutex_lock(&con->mutex); 1942 mutex_lock(&con->mutex);
1943 if (test_and_clear_bit(BACKOFF, &con->state)) {
1944 dout("con_work %p backing off\n", con);
1945 if (queue_delayed_work(ceph_msgr_wq, &con->work,
1946 round_jiffies_relative(con->delay))) {
1947 dout("con_work %p backoff %lu\n", con, con->delay);
1948 mutex_unlock(&con->mutex);
1949 return;
1950 } else {
1951 con->ops->put(con);
1952 dout("con_work %p FAILED to back off %lu\n", con,
1953 con->delay);
1954 }
1955 }
1954 1956
1957 if (test_bit(STANDBY, &con->state)) {
1958 dout("con_work %p STANDBY\n", con);
1959 goto done;
1960 }
1955 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ 1961 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1956 dout("con_work CLOSED\n"); 1962 dout("con_work CLOSED\n");
1957 con_close_socket(con); 1963 con_close_socket(con);
@@ -2008,10 +2014,12 @@ static void ceph_fault(struct ceph_connection *con)
2008 /* Requeue anything that hasn't been acked */ 2014 /* Requeue anything that hasn't been acked */
2009 list_splice_init(&con->out_sent, &con->out_queue); 2015 list_splice_init(&con->out_sent, &con->out_queue);
2010 2016
2011 /* If there are no messages in the queue, place the connection 2017 /* If there are no messages queued or keepalive pending, place
2012 * in a STANDBY state (i.e., don't try to reconnect just yet). */ 2018 * the connection in a STANDBY state */
2013 if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { 2019 if (list_empty(&con->out_queue) &&
2014 dout("fault setting STANDBY\n"); 2020 !test_bit(KEEPALIVE_PENDING, &con->state)) {
2021 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2022 clear_bit(WRITE_PENDING, &con->state);
2015 set_bit(STANDBY, &con->state); 2023 set_bit(STANDBY, &con->state);
2016 } else { 2024 } else {
2017 /* retry after a delay. */ 2025 /* retry after a delay. */
@@ -2019,11 +2027,24 @@ static void ceph_fault(struct ceph_connection *con)
2019 con->delay = BASE_DELAY_INTERVAL; 2027 con->delay = BASE_DELAY_INTERVAL;
2020 else if (con->delay < MAX_DELAY_INTERVAL) 2028 else if (con->delay < MAX_DELAY_INTERVAL)
2021 con->delay *= 2; 2029 con->delay *= 2;
2022 dout("fault queueing %p delay %lu\n", con, con->delay);
2023 con->ops->get(con); 2030 con->ops->get(con);
2024 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2031 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2025 round_jiffies_relative(con->delay)) == 0) 2032 round_jiffies_relative(con->delay))) {
2033 dout("fault queued %p delay %lu\n", con, con->delay);
2034 } else {
2026 con->ops->put(con); 2035 con->ops->put(con);
2036 dout("fault failed to queue %p delay %lu, backoff\n",
2037 con, con->delay);
2038 /*
2039 * In many cases we see a socket state change
2040 * while con_work is running and end up
2041 * queuing (non-delayed) work, such that we
2042 * can't backoff with a delay. Set a flag so
2043 * that when con_work restarts we schedule the
2044 * delay then.
2045 */
2046 set_bit(BACKOFF, &con->state);
2047 }
2027 } 2048 }
2028 2049
2029out_unlock: 2050out_unlock:
@@ -2094,6 +2115,19 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr)
2094} 2115}
2095EXPORT_SYMBOL(ceph_messenger_destroy); 2116EXPORT_SYMBOL(ceph_messenger_destroy);
2096 2117
2118static void clear_standby(struct ceph_connection *con)
2119{
2120 /* come back from STANDBY? */
2121 if (test_and_clear_bit(STANDBY, &con->state)) {
2122 mutex_lock(&con->mutex);
2123 dout("clear_standby %p and ++connect_seq\n", con);
2124 con->connect_seq++;
2125 WARN_ON(test_bit(WRITE_PENDING, &con->state));
2126 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
2127 mutex_unlock(&con->mutex);
2128 }
2129}
2130
2097/* 2131/*
2098 * Queue up an outgoing message on the given connection. 2132 * Queue up an outgoing message on the given connection.
2099 */ 2133 */
@@ -2126,6 +2160,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2126 2160
2127 /* if there wasn't anything waiting to send before, queue 2161 /* if there wasn't anything waiting to send before, queue
2128 * new work */ 2162 * new work */
2163 clear_standby(con);
2129 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2164 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2130 queue_con(con); 2165 queue_con(con);
2131} 2166}
@@ -2191,6 +2226,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2191 */ 2226 */
2192void ceph_con_keepalive(struct ceph_connection *con) 2227void ceph_con_keepalive(struct ceph_connection *con)
2193{ 2228{
2229 dout("con_keepalive %p\n", con);
2230 clear_standby(con);
2194 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && 2231 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2195 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2232 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2196 queue_con(con); 2233 queue_con(con);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 1a040e64c69f..cd9c21df87d1 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -16,22 +16,30 @@ struct page **ceph_get_direct_page_vector(const char __user *data,
16 int num_pages, bool write_page) 16 int num_pages, bool write_page)
17{ 17{
18 struct page **pages; 18 struct page **pages;
19 int rc; 19 int got = 0;
20 int rc = 0;
20 21
21 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); 22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
22 if (!pages) 23 if (!pages)
23 return ERR_PTR(-ENOMEM); 24 return ERR_PTR(-ENOMEM);
24 25
25 down_read(&current->mm->mmap_sem); 26 down_read(&current->mm->mmap_sem);
26 rc = get_user_pages(current, current->mm, (unsigned long)data, 27 while (got < num_pages) {
27 num_pages, write_page, 0, pages, NULL); 28 rc = get_user_pages(current, current->mm,
29 (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
30 num_pages - got, write_page, 0, pages + got, NULL);
31 if (rc < 0)
32 break;
33 BUG_ON(rc == 0);
34 got += rc;
35 }
28 up_read(&current->mm->mmap_sem); 36 up_read(&current->mm->mmap_sem);
29 if (rc < num_pages) 37 if (rc < 0)
30 goto fail; 38 goto fail;
31 return pages; 39 return pages;
32 40
33fail: 41fail:
34 ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); 42 ceph_put_page_vector(pages, got, false);
35 return ERR_PTR(rc); 43 return ERR_PTR(rc);
36} 44}
37EXPORT_SYMBOL(ceph_get_direct_page_vector); 45EXPORT_SYMBOL(ceph_get_direct_page_vector);
diff --git a/net/core/dev.c b/net/core/dev.c
index 8ae6631abcc2..6561021d22d1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1114,13 +1114,21 @@ EXPORT_SYMBOL(netdev_bonding_change);
1114void dev_load(struct net *net, const char *name) 1114void dev_load(struct net *net, const char *name)
1115{ 1115{
1116 struct net_device *dev; 1116 struct net_device *dev;
1117 int no_module;
1117 1118
1118 rcu_read_lock(); 1119 rcu_read_lock();
1119 dev = dev_get_by_name_rcu(net, name); 1120 dev = dev_get_by_name_rcu(net, name);
1120 rcu_read_unlock(); 1121 rcu_read_unlock();
1121 1122
1122 if (!dev && capable(CAP_NET_ADMIN)) 1123 no_module = !dev;
1123 request_module("%s", name); 1124 if (no_module && capable(CAP_NET_ADMIN))
1125 no_module = request_module("netdev-%s", name);
1126 if (no_module && capable(CAP_SYS_MODULE)) {
1127 if (!request_module("%s", name))
1128 pr_err("Loading kernel module for a network device "
1129"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
1130"instead\n", name);
1131 }
1124} 1132}
1125EXPORT_SYMBOL(dev_load); 1133EXPORT_SYMBOL(dev_load);
1126 1134
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 508f9c18992f..133fd22ea287 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
144 144
145 list_for_each_entry(ha, &from_list->list, list) { 145 list_for_each_entry(ha, &from_list->list, list) {
146 type = addr_type ? addr_type : ha->type; 146 type = addr_type ? addr_type : ha->type;
147 __hw_addr_del(to_list, ha->addr, addr_len, addr_type); 147 __hw_addr_del(to_list, ha->addr, addr_len, type);
148 } 148 }
149} 149}
150EXPORT_SYMBOL(__hw_addr_del_multiple); 150EXPORT_SYMBOL(__hw_addr_del_multiple);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a9e7fc4c461f..b5bada92f637 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3321,7 +3321,7 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3321 pkt_dev->started_at); 3321 pkt_dev->started_at);
3322 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); 3322 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
3323 3323
3324 p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n", 3324 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
3325 (unsigned long long)ktime_to_us(elapsed), 3325 (unsigned long long)ktime_to_us(elapsed),
3326 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), 3326 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
3327 (unsigned long long)ktime_to_us(idle), 3327 (unsigned long long)ktime_to_us(idle),
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d5074a567289..c44348adba3b 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1193,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1193 goto err; 1193 goto err;
1194 } 1194 }
1195 1195
1196 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { 1196 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1198 err = ops->ieee_setpfc(netdev, pfc); 1198 err = ops->ieee_setpfc(netdev, pfc);
1199 if (err) 1199 if (err)
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 8cde009e8b85..4222e7a654b0 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
614 /* Caller (dccp_v4_do_rcv) will send Reset */ 614 /* Caller (dccp_v4_do_rcv) will send Reset */
615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
616 return 1; 616 return 1;
617 } else if (sk->sk_state == DCCP_CLOSED) {
618 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
619 return 1;
617 } 620 }
618 621
619 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { 622 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
668 } 671 }
669 672
670 switch (sk->sk_state) { 673 switch (sk->sk_state) {
671 case DCCP_CLOSED:
672 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
673 return 1;
674
675 case DCCP_REQUESTING: 674 case DCCP_REQUESTING:
676 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); 675 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
677 if (queued >= 0) 676 if (queued >= 0)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 739435a6af39..cfa7a5e1c5c9 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
67 size_t result_len = 0; 67 size_t result_len = 0;
68 const char *data = _data, *end, *opt; 68 const char *data = _data, *end, *opt;
69 69
70 kenter("%%%d,%s,'%s',%zu", 70 kenter("%%%d,%s,'%*.*s',%zu",
71 key->serial, key->description, data, datalen); 71 key->serial, key->description,
72 (int)datalen, (int)datalen, data, datalen);
72 73
73 if (datalen <= 1 || !data || data[datalen - 1] != '\0') 74 if (datalen <= 1 || !data || data[datalen - 1] != '\0')
74 return -EINVAL; 75 return -EINVAL;
@@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
217 seq_printf(m, ": %u", key->datalen); 218 seq_printf(m, ": %u", key->datalen);
218} 219}
219 220
221/*
222 * read the DNS data
223 * - the key's semaphore is read-locked
224 */
225static long dns_resolver_read(const struct key *key,
226 char __user *buffer, size_t buflen)
227{
228 if (key->type_data.x[0])
229 return key->type_data.x[0];
230
231 return user_read(key, buffer, buflen);
232}
233
220struct key_type key_type_dns_resolver = { 234struct key_type key_type_dns_resolver = {
221 .name = "dns_resolver", 235 .name = "dns_resolver",
222 .instantiate = dns_resolver_instantiate, 236 .instantiate = dns_resolver_instantiate,
@@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = {
224 .revoke = user_revoke, 238 .revoke = user_revoke,
225 .destroy = user_destroy, 239 .destroy = user_destroy,
226 .describe = dns_resolver_describe, 240 .describe = dns_resolver_describe,
227 .read = user_read, 241 .read = dns_resolver_read,
228}; 242};
229 243
230static int __init init_dns_resolver(void) 244static int __init init_dns_resolver(void)
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index df4616fce929..036652c8166d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -670,7 +670,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
670 ifap = &ifa->ifa_next) { 670 ifap = &ifa->ifa_next) {
671 if (!strcmp(ifr.ifr_name, ifa->ifa_label) && 671 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
672 sin_orig.sin_addr.s_addr == 672 sin_orig.sin_addr.s_addr ==
673 ifa->ifa_address) { 673 ifa->ifa_local) {
674 break; /* found */ 674 break; /* found */
675 } 675 }
676 } 676 }
@@ -1040,8 +1040,8 @@ static void inetdev_send_gratuitous_arp(struct net_device *dev,
1040 return; 1040 return;
1041 1041
1042 arp_send(ARPOP_REQUEST, ETH_P_ARP, 1042 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1043 ifa->ifa_address, dev, 1043 ifa->ifa_local, dev,
1044 ifa->ifa_address, NULL, 1044 ifa->ifa_local, NULL,
1045 dev->dev_addr, NULL); 1045 dev->dev_addr, NULL);
1046} 1046}
1047 1047
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6613edfac28c..d1d0e2c256fc 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1765,4 +1765,4 @@ module_exit(ipgre_fini);
1765MODULE_LICENSE("GPL"); 1765MODULE_LICENSE("GPL");
1766MODULE_ALIAS_RTNL_LINK("gre"); 1766MODULE_ALIAS_RTNL_LINK("gre");
1767MODULE_ALIAS_RTNL_LINK("gretap"); 1767MODULE_ALIAS_RTNL_LINK("gretap");
1768MODULE_ALIAS("gre0"); 1768MODULE_ALIAS_NETDEV("gre0");
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 988f52fba54a..a5f58e7cbb26 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -913,4 +913,4 @@ static void __exit ipip_fini(void)
913module_init(ipip_init); 913module_init(ipip_init);
914module_exit(ipip_fini); 914module_exit(ipip_fini);
915MODULE_LICENSE("GPL"); 915MODULE_LICENSE("GPL");
916MODULE_ALIAS("tunl0"); 916MODULE_ALIAS_NETDEV("tunl0");
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4f4483e697bd..e528a42a52be 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -57,6 +57,7 @@
57MODULE_AUTHOR("Ville Nuorvala"); 57MODULE_AUTHOR("Ville Nuorvala");
58MODULE_DESCRIPTION("IPv6 tunneling device"); 58MODULE_DESCRIPTION("IPv6 tunneling device");
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60MODULE_ALIAS_NETDEV("ip6tnl0");
60 61
61#ifdef IP6_TNL_DEBUG 62#ifdef IP6_TNL_DEBUG
62#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) 63#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a998db6e7895..e7db7014e89f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -739,8 +739,10 @@ restart:
739 739
740 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 740 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
741 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); 741 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
742 else 742 else if (!(rt->dst.flags & DST_HOST))
743 nrt = rt6_alloc_clone(rt, &fl->fl6_dst); 743 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
744 else
745 goto out2;
744 746
745 dst_release(&rt->dst); 747 dst_release(&rt->dst);
746 rt = nrt ? : net->ipv6.ip6_null_entry; 748 rt = nrt ? : net->ipv6.ip6_null_entry;
@@ -2557,14 +2559,16 @@ static
2557int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, 2559int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2558 void __user *buffer, size_t *lenp, loff_t *ppos) 2560 void __user *buffer, size_t *lenp, loff_t *ppos)
2559{ 2561{
2560 struct net *net = current->nsproxy->net_ns; 2562 struct net *net;
2561 int delay = net->ipv6.sysctl.flush_delay; 2563 int delay;
2562 if (write) { 2564 if (!write)
2563 proc_dointvec(ctl, write, buffer, lenp, ppos);
2564 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2565 return 0;
2566 } else
2567 return -EINVAL; 2565 return -EINVAL;
2566
2567 net = (struct net *)ctl->extra1;
2568 delay = net->ipv6.sysctl.flush_delay;
2569 proc_dointvec(ctl, write, buffer, lenp, ppos);
2570 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2571 return 0;
2568} 2572}
2569 2573
2570ctl_table ipv6_route_table_template[] = { 2574ctl_table ipv6_route_table_template[] = {
@@ -2651,6 +2655,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2651 2655
2652 if (table) { 2656 if (table) {
2653 table[0].data = &net->ipv6.sysctl.flush_delay; 2657 table[0].data = &net->ipv6.sysctl.flush_delay;
2658 table[0].extra1 = net;
2654 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 2659 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2655 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 2660 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2656 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 2661 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8ce38f10a547..d2c16e10f650 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1290,4 +1290,4 @@ static int __init sit_init(void)
1290module_init(sit_init); 1290module_init(sit_init);
1291module_exit(sit_cleanup); 1291module_exit(sit_cleanup);
1292MODULE_LICENSE("GPL"); 1292MODULE_LICENSE("GPL");
1293MODULE_ALIAS("sit0"); 1293MODULE_ALIAS_NETDEV("sit0");
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 22f7ad5101ab..ba98e1308f3c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -808,9 +808,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
808 dest->u_threshold = udest->u_threshold; 808 dest->u_threshold = udest->u_threshold;
809 dest->l_threshold = udest->l_threshold; 809 dest->l_threshold = udest->l_threshold;
810 810
811 spin_lock(&dest->dst_lock); 811 spin_lock_bh(&dest->dst_lock);
812 ip_vs_dst_reset(dest); 812 ip_vs_dst_reset(dest);
813 spin_unlock(&dest->dst_lock); 813 spin_unlock_bh(&dest->dst_lock);
814 814
815 if (add) 815 if (add)
816 ip_vs_new_estimator(&dest->stats); 816 ip_vs_new_estimator(&dest->stats);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index b07393eab88e..91816998ed86 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister);
85 85
86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) 86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
87{ 87{
88 if (pf >= ARRAY_SIZE(nf_loggers))
89 return -EINVAL;
88 mutex_lock(&nf_log_mutex); 90 mutex_lock(&nf_log_mutex);
89 if (__find_logger(pf, logger->name) == NULL) { 91 if (__find_logger(pf, logger->name) == NULL) {
90 mutex_unlock(&nf_log_mutex); 92 mutex_unlock(&nf_log_mutex);
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf);
98 100
99void nf_log_unbind_pf(u_int8_t pf) 101void nf_log_unbind_pf(u_int8_t pf)
100{ 102{
103 if (pf >= ARRAY_SIZE(nf_loggers))
104 return;
101 mutex_lock(&nf_log_mutex); 105 mutex_lock(&nf_log_mutex);
102 rcu_assign_pointer(nf_loggers[pf], NULL); 106 rcu_assign_pointer(nf_loggers[pf], NULL);
103 mutex_unlock(&nf_log_mutex); 107 mutex_unlock(&nf_log_mutex);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 478181d53c55..1f924595bdef 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1407 int noblock = flags&MSG_DONTWAIT; 1407 int noblock = flags&MSG_DONTWAIT;
1408 size_t copied; 1408 size_t copied;
1409 struct sk_buff *skb, *data_skb; 1409 struct sk_buff *skb, *data_skb;
1410 int err; 1410 int err, ret;
1411 1411
1412 if (flags&MSG_OOB) 1412 if (flags&MSG_OOB)
1413 return -EOPNOTSUPP; 1413 return -EOPNOTSUPP;
@@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1470 1470
1471 skb_free_datagram(sk, skb); 1471 skb_free_datagram(sk, skb);
1472 1472
1473 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) 1473 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1474 netlink_dump(sk); 1474 ret = netlink_dump(sk);
1475 if (ret) {
1476 sk->sk_err = ret;
1477 sk->sk_error_report(sk);
1478 }
1479 }
1475 1480
1476 scm_recv(sock, msg, siocb->scm, flags); 1481 scm_recv(sock, msg, siocb->scm, flags);
1477out: 1482out:
@@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1736 struct netlink_callback *cb; 1741 struct netlink_callback *cb;
1737 struct sock *sk; 1742 struct sock *sk;
1738 struct netlink_sock *nlk; 1743 struct netlink_sock *nlk;
1744 int ret;
1739 1745
1740 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1746 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1741 if (cb == NULL) 1747 if (cb == NULL)
@@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1764 nlk->cb = cb; 1770 nlk->cb = cb;
1765 mutex_unlock(nlk->cb_mutex); 1771 mutex_unlock(nlk->cb_mutex);
1766 1772
1767 netlink_dump(sk); 1773 ret = netlink_dump(sk);
1774
1768 sock_put(sk); 1775 sock_put(sk);
1769 1776
1777 if (ret)
1778 return ret;
1779
1770 /* We successfully started a dump, by returning -EINTR we 1780 /* We successfully started a dump, by returning -EINTR we
1771 * signal not to send ACK even if it was requested. 1781 * signal not to send ACK even if it was requested.
1772 */ 1782 */
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 71f373c421bc..c47a511f203d 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
551 if (conn->c_loopback 551 if (conn->c_loopback
552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
554 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 554 scat = &rm->data.op_sg[sg];
555 ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
556 ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
557 return ret;
555 } 558 }
556 559
557 /* FIXME we may overallocate here */ 560 /* FIXME we may overallocate here */
diff --git a/net/rds/loop.c b/net/rds/loop.c
index aeec1d483b17..bca6761a3ca2 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
61 unsigned int hdr_off, unsigned int sg, 61 unsigned int hdr_off, unsigned int sg,
62 unsigned int off) 62 unsigned int off)
63{ 63{
64 struct scatterlist *sgp = &rm->data.op_sg[sg];
65 int ret = sizeof(struct rds_header) +
66 be32_to_cpu(rm->m_inc.i_hdr.h_len);
67
64 /* Do not send cong updates to loopback */ 68 /* Do not send cong updates to loopback */
65 if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 69 if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
66 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 70 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
67 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 71 ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off);
72 goto out;
68 } 73 }
69 74
70 BUG_ON(hdr_off || sg || off); 75 BUG_ON(hdr_off || sg || off);
@@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
80 NULL); 85 NULL);
81 86
82 rds_inc_put(&rm->m_inc); 87 rds_inc_put(&rm->m_inc);
83 88out:
84 return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); 89 return ret;
85} 90}
86 91
87/* 92/*
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 89315009bab1..1a2b0633fece 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
423 goto protocol_error; 423 goto protocol_error;
424 } 424 }
425 425
426 case RXRPC_PACKET_TYPE_ACKALL:
426 case RXRPC_PACKET_TYPE_ACK: 427 case RXRPC_PACKET_TYPE_ACK:
427 /* ACK processing is done in process context */ 428 /* ACK processing is done in process context */
428 read_lock_bh(&call->state_lock); 429 read_lock_bh(&call->state_lock);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 243fc09b164e..59e599498e37 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task)
252 252
253/* 253/*
254 * Mark an RPC call as having completed by clearing the 'active' bit 254 * Mark an RPC call as having completed by clearing the 'active' bit
255 * and then waking up all tasks that were sleeping.
255 */ 256 */
256static void rpc_mark_complete_task(struct rpc_task *task) 257static int rpc_complete_task(struct rpc_task *task)
257{ 258{
258 smp_mb__before_clear_bit(); 259 void *m = &task->tk_runstate;
260 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
261 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
262 unsigned long flags;
263 int ret;
264
265 spin_lock_irqsave(&wq->lock, flags);
259 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 266 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
260 smp_mb__after_clear_bit(); 267 ret = atomic_dec_and_test(&task->tk_count);
261 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); 268 if (waitqueue_active(wq))
269 __wake_up_locked_key(wq, TASK_NORMAL, &k);
270 spin_unlock_irqrestore(&wq->lock, flags);
271 return ret;
262} 272}
263 273
264/* 274/*
265 * Allow callers to wait for completion of an RPC call 275 * Allow callers to wait for completion of an RPC call
276 *
277 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
278 * to enforce taking of the wq->lock and hence avoid races with
279 * rpc_complete_task().
266 */ 280 */
267int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) 281int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
268{ 282{
269 if (action == NULL) 283 if (action == NULL)
270 action = rpc_wait_bit_killable; 284 action = rpc_wait_bit_killable;
271 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 285 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
272 action, TASK_KILLABLE); 286 action, TASK_KILLABLE);
273} 287}
274EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 288EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work)
857 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 871 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
858} 872}
859 873
860void rpc_put_task(struct rpc_task *task) 874static void rpc_release_resources_task(struct rpc_task *task)
861{ 875{
862 if (!atomic_dec_and_test(&task->tk_count))
863 return;
864 /* Release resources */
865 if (task->tk_rqstp) 876 if (task->tk_rqstp)
866 xprt_release(task); 877 xprt_release(task);
867 if (task->tk_msg.rpc_cred) 878 if (task->tk_msg.rpc_cred)
868 put_rpccred(task->tk_msg.rpc_cred); 879 put_rpccred(task->tk_msg.rpc_cred);
869 rpc_task_release_client(task); 880 rpc_task_release_client(task);
870 if (task->tk_workqueue != NULL) { 881}
882
883static void rpc_final_put_task(struct rpc_task *task,
884 struct workqueue_struct *q)
885{
886 if (q != NULL) {
871 INIT_WORK(&task->u.tk_work, rpc_async_release); 887 INIT_WORK(&task->u.tk_work, rpc_async_release);
872 queue_work(task->tk_workqueue, &task->u.tk_work); 888 queue_work(q, &task->u.tk_work);
873 } else 889 } else
874 rpc_free_task(task); 890 rpc_free_task(task);
875} 891}
892
893static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
894{
895 if (atomic_dec_and_test(&task->tk_count)) {
896 rpc_release_resources_task(task);
897 rpc_final_put_task(task, q);
898 }
899}
900
901void rpc_put_task(struct rpc_task *task)
902{
903 rpc_do_put_task(task, NULL);
904}
876EXPORT_SYMBOL_GPL(rpc_put_task); 905EXPORT_SYMBOL_GPL(rpc_put_task);
877 906
907void rpc_put_task_async(struct rpc_task *task)
908{
909 rpc_do_put_task(task, task->tk_workqueue);
910}
911EXPORT_SYMBOL_GPL(rpc_put_task_async);
912
878static void rpc_release_task(struct rpc_task *task) 913static void rpc_release_task(struct rpc_task *task)
879{ 914{
880 dprintk("RPC: %5u release task\n", task->tk_pid); 915 dprintk("RPC: %5u release task\n", task->tk_pid);
881 916
882 BUG_ON (RPC_IS_QUEUED(task)); 917 BUG_ON (RPC_IS_QUEUED(task));
883 918
884 /* Wake up anyone who is waiting for task completion */ 919 rpc_release_resources_task(task);
885 rpc_mark_complete_task(task);
886 920
887 rpc_put_task(task); 921 /*
922 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
923 * so it should be safe to use task->tk_count as a test for whether
924 * or not any other processes still hold references to our rpc_task.
925 */
926 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
927 /* Wake up anyone who may be waiting for task completion */
928 if (!rpc_complete_task(task))
929 return;
930 } else {
931 if (!atomic_dec_and_test(&task->tk_count))
932 return;
933 }
934 rpc_final_put_task(task, task->tk_workqueue);
888} 935}
889 936
890int rpciod_up(void) 937int rpciod_up(void)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 9df1eadc912a..1a10dcd999ea 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1335 p, 0, length, DMA_FROM_DEVICE); 1335 p, 0, length, DMA_FROM_DEVICE);
1336 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { 1336 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1337 put_page(p); 1337 put_page(p);
1338 svc_rdma_put_context(ctxt, 1);
1338 return; 1339 return;
1339 } 1340 }
1340 atomic_inc(&xprt->sc_dma_used); 1341 atomic_inc(&xprt->sc_dma_used);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c431f5a57960..be96d429b475 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1631 } 1631 }
1632 xs_reclassify_socket(family, sock); 1632 xs_reclassify_socket(family, sock);
1633 1633
1634 if (xs_bind(transport, sock)) { 1634 err = xs_bind(transport, sock);
1635 if (err) {
1635 sock_release(sock); 1636 sock_release(sock);
1636 goto out; 1637 goto out;
1637 } 1638 }
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index dd419d286204..437a99e560e1 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1724,7 +1724,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1724 1724
1725 msg->msg_namelen = 0; 1725 msg->msg_namelen = 0;
1726 1726
1727 mutex_lock(&u->readlock); 1727 err = mutex_lock_interruptible(&u->readlock);
1728 if (err) {
1729 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1730 goto out;
1731 }
1728 1732
1729 skb = skb_recv_datagram(sk, flags, noblock, &err); 1733 skb = skb_recv_datagram(sk, flags, noblock, &err);
1730 if (!skb) { 1734 if (!skb) {
@@ -1864,7 +1868,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1864 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1868 memset(&tmp_scm, 0, sizeof(tmp_scm));
1865 } 1869 }
1866 1870
1867 mutex_lock(&u->readlock); 1871 err = mutex_lock_interruptible(&u->readlock);
1872 if (err) {
1873 err = sock_intr_errno(timeo);
1874 goto out;
1875 }
1868 1876
1869 do { 1877 do {
1870 int chunk; 1878 int chunk;
@@ -1895,11 +1903,12 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1895 1903
1896 timeo = unix_stream_data_wait(sk, timeo); 1904 timeo = unix_stream_data_wait(sk, timeo);
1897 1905
1898 if (signal_pending(current)) { 1906 if (signal_pending(current)
1907 || mutex_lock_interruptible(&u->readlock)) {
1899 err = sock_intr_errno(timeo); 1908 err = sock_intr_errno(timeo);
1900 goto out; 1909 goto out;
1901 } 1910 }
1902 mutex_lock(&u->readlock); 1911
1903 continue; 1912 continue;
1904 unlock: 1913 unlock:
1905 unix_state_unlock(sk); 1914 unix_state_unlock(sk);
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index 6c94c6ce2925..291228e25984 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -309,6 +309,11 @@ static void do_config_file(const char *filename)
309 close(fd); 309 close(fd);
310} 310}
311 311
312/*
313 * Important: The below generated source_foo.o and deps_foo.o variable
314 * assignments are parsed not only by make, but also by the rather simple
315 * parser in scripts/mod/sumversion.c.
316 */
312static void parse_dep_file(void *map, size_t len) 317static void parse_dep_file(void *map, size_t len)
313{ 318{
314 char *m = map; 319 char *m = map;
@@ -323,7 +328,6 @@ static void parse_dep_file(void *map, size_t len)
323 exit(1); 328 exit(1);
324 } 329 }
325 memcpy(s, m, p-m); s[p-m] = 0; 330 memcpy(s, m, p-m); s[p-m] = 0;
326 printf("deps_%s := \\\n", target);
327 m = p+1; 331 m = p+1;
328 332
329 clear_config(); 333 clear_config();
@@ -343,12 +347,15 @@ static void parse_dep_file(void *map, size_t len)
343 strrcmp(s, "arch/um/include/uml-config.h") && 347 strrcmp(s, "arch/um/include/uml-config.h") &&
344 strrcmp(s, ".ver")) { 348 strrcmp(s, ".ver")) {
345 /* 349 /*
346 * Do not output the first dependency (the 350 * Do not list the source file as dependency, so that
347 * source file), so that kbuild is not confused 351 * kbuild is not confused if a .c file is rewritten
348 * if a .c file is rewritten into .S or vice 352 * into .S or vice versa. Storing it in source_* is
349 * versa. 353 * needed for modpost to compute srcversions.
350 */ 354 */
351 if (!first) 355 if (first) {
356 printf("source_%s := %s\n\n", target, s);
357 printf("deps_%s := \\\n", target);
358 } else
352 printf(" %s \\\n", s); 359 printf(" %s \\\n", s);
353 do_config_file(s); 360 do_config_file(s);
354 } 361 }
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
index ecf9c7dc1825..9dfcd6d988da 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -300,8 +300,8 @@ static int is_static_library(const char *objfile)
300 return 0; 300 return 0;
301} 301}
302 302
303/* We have dir/file.o. Open dir/.file.o.cmd, look for deps_ line to 303/* We have dir/file.o. Open dir/.file.o.cmd, look for source_ and deps_ line
304 * figure out source file. */ 304 * to figure out source files. */
305static int parse_source_files(const char *objfile, struct md4_ctx *md) 305static int parse_source_files(const char *objfile, struct md4_ctx *md)
306{ 306{
307 char *cmd, *file, *line, *dir; 307 char *cmd, *file, *line, *dir;
@@ -340,6 +340,21 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
340 */ 340 */
341 while ((line = get_next_line(&pos, file, flen)) != NULL) { 341 while ((line = get_next_line(&pos, file, flen)) != NULL) {
342 char* p = line; 342 char* p = line;
343
344 if (strncmp(line, "source_", sizeof("source_")-1) == 0) {
345 p = strrchr(line, ' ');
346 if (!p) {
347 warn("malformed line: %s\n", line);
348 goto out_file;
349 }
350 p++;
351 if (!parse_file(p, md)) {
352 warn("could not open %s: %s\n",
353 p, strerror(errno));
354 goto out_file;
355 }
356 continue;
357 }
343 if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) { 358 if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) {
344 check_files = 1; 359 check_files = 1;
345 continue; 360 continue;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a07b031090d8..067982f4f182 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -1039,9 +1039,11 @@ static struct hda_verb cs_errata_init_verbs[] = {
1039 {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, 1039 {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
1040 {0x11, AC_VERB_SET_PROC_STATE, 0x00}, 1040 {0x11, AC_VERB_SET_PROC_STATE, 0x00},
1041 1041
1042#if 0 /* Don't to set to D3 as we are in power-up sequence */
1042 {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ 1043 {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
1043 {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ 1044 {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
1044 /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ 1045 /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
1046#endif
1045 1047
1046 {} /* terminator */ 1048 {} /* terminator */
1047}; 1049};
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index a58767736727..ec0fa2dd0a27 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1634,6 +1634,9 @@ static struct hda_codec_preset snd_hda_preset_hdmi[] = {
1634{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1634{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1635{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1635{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1636{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1636{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1637{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1638{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1639/* 17 is known to be absent */
1637{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1640{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1638{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1641{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1639{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1642{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
@@ -1676,6 +1679,8 @@ MODULE_ALIAS("snd-hda-codec-id:10de0011");
1676MODULE_ALIAS("snd-hda-codec-id:10de0012"); 1679MODULE_ALIAS("snd-hda-codec-id:10de0012");
1677MODULE_ALIAS("snd-hda-codec-id:10de0013"); 1680MODULE_ALIAS("snd-hda-codec-id:10de0013");
1678MODULE_ALIAS("snd-hda-codec-id:10de0014"); 1681MODULE_ALIAS("snd-hda-codec-id:10de0014");
1682MODULE_ALIAS("snd-hda-codec-id:10de0015");
1683MODULE_ALIAS("snd-hda-codec-id:10de0016");
1679MODULE_ALIAS("snd-hda-codec-id:10de0018"); 1684MODULE_ALIAS("snd-hda-codec-id:10de0018");
1680MODULE_ALIAS("snd-hda-codec-id:10de0019"); 1685MODULE_ALIAS("snd-hda-codec-id:10de0019");
1681MODULE_ALIAS("snd-hda-codec-id:10de001a"); 1686MODULE_ALIAS("snd-hda-codec-id:10de001a");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3328a259a242..4261bb8eec1d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1133,11 +1133,8 @@ static void alc_automute_speaker(struct hda_codec *codec, int pinctl)
1133 nid = spec->autocfg.hp_pins[i]; 1133 nid = spec->autocfg.hp_pins[i];
1134 if (!nid) 1134 if (!nid)
1135 break; 1135 break;
1136 if (snd_hda_jack_detect(codec, nid)) { 1136 alc_report_jack(codec, nid);
1137 spec->jack_present = 1; 1137 spec->jack_present |= snd_hda_jack_detect(codec, nid);
1138 break;
1139 }
1140 alc_report_jack(codec, spec->autocfg.hp_pins[i]);
1141 } 1138 }
1142 1139
1143 mute = spec->jack_present ? HDA_AMP_MUTE : 0; 1140 mute = spec->jack_present ? HDA_AMP_MUTE : 0;
@@ -15015,7 +15012,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = {
15015 SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC), 15012 SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC),
15016 SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC), 15013 SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC),
15017 SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC), 15014 SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC),
15018 SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC), 15015 SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC),
15019 SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC), 15016 SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC),
15020 SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC), 15017 SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC),
15021 SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC), 15018 SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC),
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index 4bbc3442703f..8dfb0a0da673 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -145,18 +145,18 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = {
145 SOC_SINGLE("DAC Playback Limiter Threshold", 145 SOC_SINGLE("DAC Playback Limiter Threshold",
146 WM8978_DAC_LIMITER_2, 4, 7, 0), 146 WM8978_DAC_LIMITER_2, 4, 7, 0),
147 SOC_SINGLE("DAC Playback Limiter Boost", 147 SOC_SINGLE("DAC Playback Limiter Boost",
148 WM8978_DAC_LIMITER_2, 0, 15, 0), 148 WM8978_DAC_LIMITER_2, 0, 12, 0),
149 149
150 SOC_ENUM("ALC Enable Switch", alc1), 150 SOC_ENUM("ALC Enable Switch", alc1),
151 SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0), 151 SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0),
152 SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0), 152 SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0),
153 153
154 SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 7, 0), 154 SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 10, 0),
155 SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0), 155 SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0),
156 156
157 SOC_ENUM("ALC Capture Mode", alc3), 157 SOC_ENUM("ALC Capture Mode", alc3),
158 SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 15, 0), 158 SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 10, 0),
159 SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 15, 0), 159 SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 10, 0),
160 160
161 SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0), 161 SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0),
162 SOC_SINGLE("ALC Capture Noise Gate Threshold", 162 SOC_SINGLE("ALC Capture Noise Gate Threshold",
@@ -211,8 +211,10 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = {
211 WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1), 211 WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1),
212 212
213 /* DAC / ADC oversampling */ 213 /* DAC / ADC oversampling */
214 SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL, 8, 1, 0), 214 SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL,
215 SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL, 8, 1, 0), 215 5, 1, 0),
216 SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL,
217 5, 1, 0),
216}; 218};
217 219
218/* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */ 220/* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index ebaee5ca7434..c6c958ee5d59 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -110,6 +110,9 @@ struct wm8994_priv {
110 110
111 unsigned int aif1clk_enable:1; 111 unsigned int aif1clk_enable:1;
112 unsigned int aif2clk_enable:1; 112 unsigned int aif2clk_enable:1;
113
114 unsigned int aif1clk_disable:1;
115 unsigned int aif2clk_disable:1;
113}; 116};
114 117
115static int wm8994_readable(unsigned int reg) 118static int wm8994_readable(unsigned int reg)
@@ -1015,14 +1018,18 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w,
1015 1018
1016 switch (event) { 1019 switch (event) {
1017 case SND_SOC_DAPM_PRE_PMU: 1020 case SND_SOC_DAPM_PRE_PMU:
1018 if (wm8994->aif1clk_enable) 1021 if (wm8994->aif1clk_enable) {
1019 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1022 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
1020 WM8994_AIF1CLK_ENA_MASK, 1023 WM8994_AIF1CLK_ENA_MASK,
1021 WM8994_AIF1CLK_ENA); 1024 WM8994_AIF1CLK_ENA);
1022 if (wm8994->aif2clk_enable) 1025 wm8994->aif1clk_enable = 0;
1026 }
1027 if (wm8994->aif2clk_enable) {
1023 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1028 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
1024 WM8994_AIF2CLK_ENA_MASK, 1029 WM8994_AIF2CLK_ENA_MASK,
1025 WM8994_AIF2CLK_ENA); 1030 WM8994_AIF2CLK_ENA);
1031 wm8994->aif2clk_enable = 0;
1032 }
1026 break; 1033 break;
1027 } 1034 }
1028 1035
@@ -1037,15 +1044,15 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w,
1037 1044
1038 switch (event) { 1045 switch (event) {
1039 case SND_SOC_DAPM_POST_PMD: 1046 case SND_SOC_DAPM_POST_PMD:
1040 if (wm8994->aif1clk_enable) { 1047 if (wm8994->aif1clk_disable) {
1041 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1048 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
1042 WM8994_AIF1CLK_ENA_MASK, 0); 1049 WM8994_AIF1CLK_ENA_MASK, 0);
1043 wm8994->aif1clk_enable = 0; 1050 wm8994->aif1clk_disable = 0;
1044 } 1051 }
1045 if (wm8994->aif2clk_enable) { 1052 if (wm8994->aif2clk_disable) {
1046 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1053 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
1047 WM8994_AIF2CLK_ENA_MASK, 0); 1054 WM8994_AIF2CLK_ENA_MASK, 0);
1048 wm8994->aif2clk_enable = 0; 1055 wm8994->aif2clk_disable = 0;
1049 } 1056 }
1050 break; 1057 break;
1051 } 1058 }
@@ -1063,6 +1070,9 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
1063 case SND_SOC_DAPM_PRE_PMU: 1070 case SND_SOC_DAPM_PRE_PMU:
1064 wm8994->aif1clk_enable = 1; 1071 wm8994->aif1clk_enable = 1;
1065 break; 1072 break;
1073 case SND_SOC_DAPM_POST_PMD:
1074 wm8994->aif1clk_disable = 1;
1075 break;
1066 } 1076 }
1067 1077
1068 return 0; 1078 return 0;
@@ -1078,11 +1088,21 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
1078 case SND_SOC_DAPM_PRE_PMU: 1088 case SND_SOC_DAPM_PRE_PMU:
1079 wm8994->aif2clk_enable = 1; 1089 wm8994->aif2clk_enable = 1;
1080 break; 1090 break;
1091 case SND_SOC_DAPM_POST_PMD:
1092 wm8994->aif2clk_disable = 1;
1093 break;
1081 } 1094 }
1082 1095
1083 return 0; 1096 return 0;
1084} 1097}
1085 1098
1099static int adc_mux_ev(struct snd_soc_dapm_widget *w,
1100 struct snd_kcontrol *kcontrol, int event)
1101{
1102 late_enable_ev(w, kcontrol, event);
1103 return 0;
1104}
1105
1086static int dac_ev(struct snd_soc_dapm_widget *w, 1106static int dac_ev(struct snd_soc_dapm_widget *w,
1087 struct snd_kcontrol *kcontrol, int event) 1107 struct snd_kcontrol *kcontrol, int event)
1088{ 1108{
@@ -1398,11 +1418,23 @@ SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0,
1398 1418
1399static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = { 1419static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = {
1400SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0), 1420SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
1401SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0), 1421SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
1402SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0), 1422SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
1403SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), 1423SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
1404}; 1424};
1405 1425
1426static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
1427SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
1428 adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
1429SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
1430 adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
1431};
1432
1433static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
1434SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
1435SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
1436};
1437
1406static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { 1438static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
1407SND_SOC_DAPM_INPUT("DMIC1DAT"), 1439SND_SOC_DAPM_INPUT("DMIC1DAT"),
1408SND_SOC_DAPM_INPUT("DMIC2DAT"), 1440SND_SOC_DAPM_INPUT("DMIC2DAT"),
@@ -1497,9 +1529,6 @@ SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0),
1497SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0), 1529SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0),
1498SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), 1530SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0),
1499 1531
1500SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
1501SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
1502
1503SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), 1532SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux),
1504SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), 1533SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux),
1505 1534
@@ -3280,11 +3309,15 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3280 if (wm8994->revision < 4) { 3309 if (wm8994->revision < 4) {
3281 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets, 3310 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets,
3282 ARRAY_SIZE(wm8994_lateclk_revd_widgets)); 3311 ARRAY_SIZE(wm8994_lateclk_revd_widgets));
3312 snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets,
3313 ARRAY_SIZE(wm8994_adc_revd_widgets));
3283 snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets, 3314 snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets,
3284 ARRAY_SIZE(wm8994_dac_revd_widgets)); 3315 ARRAY_SIZE(wm8994_dac_revd_widgets));
3285 } else { 3316 } else {
3286 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, 3317 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
3287 ARRAY_SIZE(wm8994_lateclk_widgets)); 3318 ARRAY_SIZE(wm8994_lateclk_widgets));
3319 snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets,
3320 ARRAY_SIZE(wm8994_adc_widgets));
3288 snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, 3321 snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
3289 ARRAY_SIZE(wm8994_dac_widgets)); 3322 ARRAY_SIZE(wm8994_dac_widgets));
3290 } 3323 }
@@ -3292,6 +3325,12 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3292 case WM8958: 3325 case WM8958:
3293 snd_soc_add_controls(codec, wm8958_snd_controls, 3326 snd_soc_add_controls(codec, wm8958_snd_controls,
3294 ARRAY_SIZE(wm8958_snd_controls)); 3327 ARRAY_SIZE(wm8958_snd_controls));
3328 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
3329 ARRAY_SIZE(wm8994_lateclk_widgets));
3330 snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets,
3331 ARRAY_SIZE(wm8994_adc_widgets));
3332 snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
3333 ARRAY_SIZE(wm8994_dac_widgets));
3295 snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets, 3334 snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
3296 ARRAY_SIZE(wm8958_dapm_widgets)); 3335 ARRAY_SIZE(wm8958_dapm_widgets));
3297 break; 3336 break;
@@ -3317,6 +3356,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3317 } 3356 }
3318 break; 3357 break;
3319 case WM8958: 3358 case WM8958:
3359 snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon,
3360 ARRAY_SIZE(wm8994_lateclk_intercon));
3320 snd_soc_dapm_add_routes(dapm, wm8958_intercon, 3361 snd_soc_dapm_add_routes(dapm, wm8958_intercon,
3321 ARRAY_SIZE(wm8958_intercon)); 3362 ARRAY_SIZE(wm8958_intercon));
3322 break; 3363 break;
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 43825b2102a5..cce704c275c6 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -15,6 +15,7 @@
15#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/device.h>
18#include <linux/pm.h> 19#include <linux/pm.h>
19#include <linux/i2c.h> 20#include <linux/i2c.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
@@ -1341,6 +1342,10 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
1341 wm9081->control_type = SND_SOC_I2C; 1342 wm9081->control_type = SND_SOC_I2C;
1342 wm9081->control_data = i2c; 1343 wm9081->control_data = i2c;
1343 1344
1345 if (dev_get_platdata(&i2c->dev))
1346 memcpy(&wm9081->retune, dev_get_platdata(&i2c->dev),
1347 sizeof(wm9081->retune));
1348
1344 ret = snd_soc_register_codec(&i2c->dev, 1349 ret = snd_soc_register_codec(&i2c->dev,
1345 &soc_codec_dev_wm9081, &wm9081_dai, 1); 1350 &soc_codec_dev_wm9081, &wm9081_dai, 1);
1346 if (ret < 0) 1351 if (ret < 0)
diff --git a/sound/soc/omap/am3517evm.c b/sound/soc/omap/am3517evm.c
index 161750443ebc..73dde4a1adc3 100644
--- a/sound/soc/omap/am3517evm.c
+++ b/sound/soc/omap/am3517evm.c
@@ -139,7 +139,7 @@ static struct snd_soc_dai_link am3517evm_dai = {
139 .cpu_dai_name ="omap-mcbsp-dai.0", 139 .cpu_dai_name ="omap-mcbsp-dai.0",
140 .codec_dai_name = "tlv320aic23-hifi", 140 .codec_dai_name = "tlv320aic23-hifi",
141 .platform_name = "omap-pcm-audio", 141 .platform_name = "omap-pcm-audio",
142 .codec_name = "tlv320aic23-codec", 142 .codec_name = "tlv320aic23-codec.2-001a",
143 .init = am3517evm_aic23_init, 143 .init = am3517evm_aic23_init,
144 .ops = &am3517evm_ops, 144 .ops = &am3517evm_ops,
145}; 145};
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 25e54230cc6a..1790f83ee665 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -941,7 +941,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
941 } 941 }
942 942
943 if (!list_empty(&pending)) 943 if (!list_empty(&pending))
944 dapm_seq_run_coalesced(dapm, &pending); 944 dapm_seq_run_coalesced(cur_dapm, &pending);
945} 945}
946 946
947static void dapm_widget_update(struct snd_soc_dapm_context *dapm) 947static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index f6a929e74981..0866bcdb5e8e 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -270,11 +270,15 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
270 const char *name, bool is_kallsyms) 270 const char *name, bool is_kallsyms)
271{ 271{
272 const size_t size = PATH_MAX; 272 const size_t size = PATH_MAX;
273 char *realname = realpath(name, NULL), 273 char *realname, *filename = malloc(size),
274 *filename = malloc(size),
275 *linkname = malloc(size), *targetname; 274 *linkname = malloc(size), *targetname;
276 int len, err = -1; 275 int len, err = -1;
277 276
277 if (is_kallsyms)
278 realname = (char *)name;
279 else
280 realname = realpath(name, NULL);
281
278 if (realname == NULL || filename == NULL || linkname == NULL) 282 if (realname == NULL || filename == NULL || linkname == NULL)
279 goto out_free; 283 goto out_free;
280 284
@@ -306,7 +310,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
306 if (symlink(targetname, linkname) == 0) 310 if (symlink(targetname, linkname) == 0)
307 err = 0; 311 err = 0;
308out_free: 312out_free:
309 free(realname); 313 if (!is_kallsyms)
314 free(realname);
310 free(filename); 315 free(filename);
311 free(linkname); 316 free(linkname);
312 return err; 317 return err;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 7821d0e6866f..b1bf490aff88 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1836,7 +1836,7 @@ int dso__load_vmlinux(struct dso *self, struct map *map,
1836 int err = -1, fd; 1836 int err = -1, fd;
1837 char symfs_vmlinux[PATH_MAX]; 1837 char symfs_vmlinux[PATH_MAX];
1838 1838
1839 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s/%s", 1839 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
1840 symbol_conf.symfs, vmlinux); 1840 symbol_conf.symfs, vmlinux);
1841 fd = open(symfs_vmlinux, O_RDONLY); 1841 fd = open(symfs_vmlinux, O_RDONLY);
1842 if (fd < 0) 1842 if (fd < 0)