aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt2
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/alpha/include/asm/thread_info.h80
-rw-r--r--arch/alpha/kernel/osf_sys.c25
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/traps.c6
-rw-r--r--arch/arm/Kconfig9
-rw-r--r--arch/arm/Makefile13
-rw-r--r--arch/arm/boot/Makefile10
-rw-r--r--arch/arm/include/asm/flat.h2
-rw-r--r--arch/arm/include/asm/uaccess.h4
-rw-r--r--arch/arm/include/asm/xen/interface.h12
-rw-r--r--arch/arm/include/asm/xen/page.h13
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c4
-rw-r--r--arch/arm/kernel/machine_kexec.c7
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/kernel/smp.c14
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/lib/delay.c1
-rw-r--r--arch/arm/mm/alignment.c4
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/vmregion.h1
-rw-r--r--arch/arm/tools/Makefile2
-rw-r--r--arch/arm/xen/grant-table.c2
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/kvm_para.h34
-rw-r--r--arch/m68k/include/asm/Kbuild2
-rw-r--r--arch/m68k/include/asm/ptrace.h75
-rw-r--r--arch/m68k/include/asm/setup.h82
-rw-r--r--arch/m68k/include/asm/signal.h118
-rw-r--r--arch/m68k/include/asm/termios.h44
-rw-r--r--arch/m68k/include/asm/unistd.h356
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild23
-rw-r--r--arch/m68k/include/uapi/asm/a.out.h (renamed from arch/m68k/include/asm/a.out.h)0
-rw-r--r--arch/m68k/include/uapi/asm/auxvec.h (renamed from arch/m68k/include/asm/auxvec.h)0
-rw-r--r--arch/m68k/include/uapi/asm/byteorder.h (renamed from arch/m68k/include/asm/byteorder.h)0
-rw-r--r--arch/m68k/include/uapi/asm/cachectl.h (renamed from arch/m68k/include/asm/cachectl.h)0
-rw-r--r--arch/m68k/include/uapi/asm/fcntl.h (renamed from arch/m68k/include/asm/fcntl.h)0
-rw-r--r--arch/m68k/include/uapi/asm/ioctls.h (renamed from arch/m68k/include/asm/ioctls.h)0
-rw-r--r--arch/m68k/include/uapi/asm/msgbuf.h (renamed from arch/m68k/include/asm/msgbuf.h)0
-rw-r--r--arch/m68k/include/uapi/asm/param.h (renamed from arch/m68k/include/asm/param.h)0
-rw-r--r--arch/m68k/include/uapi/asm/poll.h (renamed from arch/m68k/include/asm/poll.h)0
-rw-r--r--arch/m68k/include/uapi/asm/posix_types.h (renamed from arch/m68k/include/asm/posix_types.h)0
-rw-r--r--arch/m68k/include/uapi/asm/ptrace.h79
-rw-r--r--arch/m68k/include/uapi/asm/sembuf.h (renamed from arch/m68k/include/asm/sembuf.h)0
-rw-r--r--arch/m68k/include/uapi/asm/setup.h103
-rw-r--r--arch/m68k/include/uapi/asm/shmbuf.h (renamed from arch/m68k/include/asm/shmbuf.h)0
-rw-r--r--arch/m68k/include/uapi/asm/sigcontext.h (renamed from arch/m68k/include/asm/sigcontext.h)0
-rw-r--r--arch/m68k/include/uapi/asm/signal.h118
-rw-r--r--arch/m68k/include/uapi/asm/socket.h (renamed from arch/m68k/include/asm/socket.h)0
-rw-r--r--arch/m68k/include/uapi/asm/sockios.h (renamed from arch/m68k/include/asm/sockios.h)0
-rw-r--r--arch/m68k/include/uapi/asm/stat.h (renamed from arch/m68k/include/asm/stat.h)0
-rw-r--r--arch/m68k/include/uapi/asm/swab.h (renamed from arch/m68k/include/asm/swab.h)0
-rw-r--r--arch/m68k/include/uapi/asm/termbits.h (renamed from arch/m68k/include/asm/termbits.h)0
-rw-r--r--arch/m68k/include/uapi/asm/termios.h44
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h357
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S2
-rw-r--r--arch/s390/include/asm/perf_event.h2
-rw-r--r--arch/s390/include/uapi/asm/Kbuild2
-rw-r--r--arch/s390/include/uapi/asm/chpid.h10
-rw-r--r--arch/s390/include/uapi/asm/kvm_para.h11
-rw-r--r--arch/s390/kernel/cache.c9
-rw-r--r--arch/s390/kernel/head_kdump.S10
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c6
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/tile/Makefile4
-rw-r--r--arch/tile/kernel/module.c10
-rw-r--r--arch/x86/include/asm/efi.h6
-rw-r--r--arch/x86/include/asm/xen/interface.h4
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c48
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c93
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c127
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/kernel/entry_32.S8
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/kvm.c3
-rw-r--r--arch/x86/kernel/setup.c27
-rw-r--r--arch/x86/kernel/signal.c4
-rw-r--r--arch/x86/kernel/uprobes.c16
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/mm/init.c58
-rw-r--r--arch/x86/mm/init_64.c7
-rw-r--r--arch/x86/oprofile/nmi_int.c2
-rw-r--r--arch/x86/platform/efi/efi.c47
-rw-r--r--arch/x86/platform/efi/efi_64.c7
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/dma-coherent.c5
-rw-r--r--drivers/base/dma-contiguous.c5
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/dw_dmac_regs.h18
-rw-r--r--drivers/edac/amd64_edac.c11
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/drm_platform.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c15
-rw-r--r--drivers/gpu/drm/i915/intel_display.c32
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c15
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pll.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/ni.c45
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c19
-rw-r--r--drivers/gpu/drm/radeon/si.c47
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c24
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/joydev.c3
-rw-r--r--drivers/input/mousedev.c3
-rw-r--r--drivers/input/tablet/wacom_sys.c51
-rw-r--r--drivers/input/tablet/wacom_wac.c88
-rw-r--r--drivers/input/tablet/wacom_wac.h5
-rw-r--r--drivers/iommu/amd_iommu_init.c39
-rw-r--r--drivers/iommu/tegra-smmu.c2
-rw-r--r--drivers/pinctrl/core.c4
-rw-r--r--drivers/pinctrl/pinconf.c4
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c6
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c2
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c24
-rw-r--r--drivers/rtc/rtc-imxdi.c2
-rw-r--r--drivers/s390/cio/css.c7
-rw-r--r--drivers/s390/cio/idset.c26
-rw-r--r--drivers/s390/cio/idset.h3
-rw-r--r--drivers/spi/spi-pl022.c3
-rw-r--r--drivers/spi/spi-rspi.c56
-rw-r--r--drivers/video/backlight/Kconfig3
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c3
-rw-r--r--drivers/xen/dbgp.c2
-rw-r--r--drivers/xen/events.c4
-rw-r--r--drivers/xen/grant-table.c8
-rw-r--r--drivers/xen/sys-hypervisor.c4
-rw-r--r--drivers/xen/xen-pciback/vpci.c14
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c2
-rw-r--r--fs/btrfs/backref.c28
-rw-r--r--fs/btrfs/backref.h4
-rw-r--r--fs/btrfs/ctree.c70
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/inode.c7
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/qgroup.c17
-rw-r--r--fs/btrfs/send.c156
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/volumes.c7
-rw-r--r--fs/char_dev.c18
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/ext4/balloc.c8
-rw-r--r--fs/ext4/bitmap.c6
-rw-r--r--fs/ext4/ext4.h7
-rw-r--r--fs/ext4/ext4_jbd2.c8
-rw-r--r--fs/ext4/extents.c60
-rw-r--r--fs/ext4/ialloc.c4
-rw-r--r--fs/ext4/mballoc.c14
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c9
-rw-r--r--fs/jfs/jfs_discard.c16
-rw-r--r--fs/lockd/mon.c57
-rw-r--r--fs/namei.c4
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/nfs4filelayout.c21
-rw-r--r--fs/nfs/nfs4filelayout.h1
-rw-r--r--fs/nfs/nfs4filelayoutdev.c22
-rw-r--r--fs/nfs/nfs4getroot.c1
-rw-r--r--fs/nfs/objlayout/objio_osd.c6
-rw-r--r--fs/nfs/pnfs.h1
-rw-r--r--fs/proc/stat.c14
-rw-r--r--include/drm/drm_pciids.h3
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--include/linux/rbtree_augmented.h1
-rw-r--r--include/linux/spi/tsc2005.h2
-rw-r--r--include/linux/uprobes.h11
-rw-r--r--include/xen/grant_table.h2
-rw-r--r--include/xen/interface/grant_table.h2
-rw-r--r--include/xen/interface/memory.h24
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/cgroup.c41
-rw-r--r--kernel/events/uprobes.c345
-rw-r--r--kernel/module_signing.c2
-rw-r--r--kernel/pid_namespace.c12
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/genalloc.c2
-rw-r--r--mm/memblock.c24
-rw-r--r--mm/mmu_notifier.c26
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/rmap.c20
-rw-r--r--net/sunrpc/xprtsock.c41
-rw-r--r--security/apparmor/policy.c24
-rw-r--r--security/device_cgroup.c87
-rw-r--r--sound/isa/opti9xx/miro.c1
-rw-r--r--sound/pci/als300.c4
-rw-r--r--sound/pci/hda/patch_realtek.c20
-rw-r--r--sound/pci/rme9652/hdspm.c234
-rw-r--r--sound/soc/codecs/wm8994.c18
-rw-r--r--sound/soc/codecs/wm8994.h1
-rw-r--r--sound/soc/ux500/mop500.c17
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.c8
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-trace.c18
-rw-r--r--tools/perf/util/parse-events-test.c12
-rw-r--r--tools/perf/util/thread.c1
-rw-r--r--tools/testing/selftests/epoll/test_epoll.c4
-rw-r--r--tools/vm/page-types.c2
-rw-r--r--usr/gen_init_cpio.c43
-rw-r--r--virt/kvm/kvm_main.c4
234 files changed, 2817 insertions, 1950 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index f54273e2ac97..ceb1ff735469 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -210,6 +210,8 @@ local_ops.txt
210 - semantics and behavior of local atomic operations. 210 - semantics and behavior of local atomic operations.
211lockdep-design.txt 211lockdep-design.txt
212 - documentation on the runtime locking correctness validator. 212 - documentation on the runtime locking correctness validator.
213lockup-watchdogs.txt
214 - info on soft and hard lockup detectors (aka nmi_watchdog).
213logo.gif 215logo.gif
214 - full colour GIF image of Linux logo (penguin - Tux). 216 - full colour GIF image of Linux logo (penguin - Tux).
215logo.txt 217logo.txt
@@ -240,8 +242,6 @@ netlabel/
240 - directory with information on the NetLabel subsystem. 242 - directory with information on the NetLabel subsystem.
241networking/ 243networking/
242 - directory with info on various aspects of networking with Linux. 244 - directory with info on various aspects of networking with Linux.
243nmi_watchdog.txt
244 - info on NMI watchdog for SMP systems.
245nommu-mmap.txt 245nommu-mmap.txt
246 - documentation about no-mmu memory mapping support. 246 - documentation about no-mmu memory mapping support.
247numastat.txt 247numastat.txt
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
index c8e578263ce2..683fde93c4fb 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
@@ -93,7 +93,7 @@ Valid values for pin and group names are:
93 93
94 With some exceptions, these support nvidia,high-speed-mode, 94 With some exceptions, these support nvidia,high-speed-mode,
95 nvidia,schmitt, nvidia,low-power-mode, nvidia,pull-down-strength, 95 nvidia,schmitt, nvidia,low-power-mode, nvidia,pull-down-strength,
96 nvidia,pull-up-strength, nvidia,slew_rate-rising, nvidia,slew_rate-falling. 96 nvidia,pull-up-strength, nvidia,slew-rate-rising, nvidia,slew-rate-falling.
97 97
98 drive_ao1, drive_ao2, drive_at1, drive_at2, drive_cdev1, drive_cdev2, 98 drive_ao1, drive_ao2, drive_at1, drive_at2, drive_cdev1, drive_cdev2,
99 drive_csus, drive_dap1, drive_dap2, drive_dap3, drive_dap4, drive_dbg, 99 drive_csus, drive_dap1, drive_dap2, drive_dap3, drive_dap4, drive_dbg,
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt
index c275b70349c1..6f426ed7009e 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt
@@ -83,7 +83,7 @@ Valid values for pin and group names are:
83 drive groups: 83 drive groups:
84 84
85 These all support nvidia,pull-down-strength, nvidia,pull-up-strength, 85 These all support nvidia,pull-down-strength, nvidia,pull-up-strength,
86 nvidia,slew_rate-rising, nvidia,slew_rate-falling. Most but not all 86 nvidia,slew-rate-rising, nvidia,slew-rate-falling. Most but not all
87 support nvidia,high-speed-mode, nvidia,schmitt, nvidia,low-power-mode. 87 support nvidia,high-speed-mode, nvidia,schmitt, nvidia,low-power-mode.
88 88
89 ao1, ao2, at1, at2, at3, at4, at5, cdev1, cdev2, cec, crt, csus, dap1, 89 ao1, ao2, at1, at2, at3, at4, at5, cdev1, cdev2, cec, crt, csus, dap1,
diff --git a/MAINTAINERS b/MAINTAINERS
index 027ec2bfa135..f39a82dc0260 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2802,6 +2802,7 @@ F: sound/usb/misc/ua101.c
2802EXTENSIBLE FIRMWARE INTERFACE (EFI) 2802EXTENSIBLE FIRMWARE INTERFACE (EFI)
2803M: Matt Fleming <matt.fleming@intel.com> 2803M: Matt Fleming <matt.fleming@intel.com>
2804L: linux-efi@vger.kernel.org 2804L: linux-efi@vger.kernel.org
2805T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
2805S: Maintained 2806S: Maintained
2806F: Documentation/x86/efi-stub.txt 2807F: Documentation/x86/efi-stub.txt
2807F: arch/ia64/kernel/efi.c 2808F: arch/ia64/kernel/efi.c
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 4554ecbff7c6..1f8c72959fb6 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -7,6 +7,7 @@
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <asm/types.h> 8#include <asm/types.h>
9#include <asm/hwrpb.h> 9#include <asm/hwrpb.h>
10#include <asm/sysinfo.h>
10#endif 11#endif
11 12
12#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
@@ -21,6 +22,7 @@ struct thread_info {
21 mm_segment_t addr_limit; /* thread address space */ 22 mm_segment_t addr_limit; /* thread address space */
22 unsigned cpu; /* current CPU */ 23 unsigned cpu; /* current CPU */
23 int preempt_count; /* 0 => preemptable, <0 => BUG */ 24 int preempt_count; /* 0 => preemptable, <0 => BUG */
25 unsigned int status; /* thread-synchronous flags */
24 26
25 int bpt_nsaved; 27 int bpt_nsaved;
26 unsigned long bpt_addr[2]; /* breakpoint handling */ 28 unsigned long bpt_addr[2]; /* breakpoint handling */
@@ -63,8 +65,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
63 * - these are process state flags and used from assembly 65 * - these are process state flags and used from assembly
64 * - pending work-to-be-done flags come first and must be assigned to be 66 * - pending work-to-be-done flags come first and must be assigned to be
65 * within bits 0 to 7 to fit in and immediate operand. 67 * within bits 0 to 7 to fit in and immediate operand.
66 * - ALPHA_UAC_SHIFT below must be kept consistent with the unaligned
67 * control flags.
68 * 68 *
69 * TIF_SYSCALL_TRACE is known to be 0 via blbs. 69 * TIF_SYSCALL_TRACE is known to be 0 via blbs.
70 */ 70 */
@@ -72,18 +72,12 @@ register struct thread_info *__current_thread_info __asm__("$8");
72#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 72#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
73#define TIF_SIGPENDING 2 /* signal pending */ 73#define TIF_SIGPENDING 2 /* signal pending */
74#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 74#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
75#define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */
76#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ 75#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */
77#define TIF_UAC_NOPRINT 10 /* ! Preserve sequence of following */
78#define TIF_UAC_NOFIX 11 /* ! flags as they match */
79#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
80#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ 76#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
81#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
82 77
83#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 78#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
84#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 79#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
85#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 80#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
86#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
87#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 81#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
88 82
89/* Work to do on interrupt/exception return. */ 83/* Work to do on interrupt/exception return. */
@@ -94,29 +88,63 @@ register struct thread_info *__current_thread_info __asm__("$8");
94#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ 88#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
95 | _TIF_SYSCALL_TRACE) 89 | _TIF_SYSCALL_TRACE)
96 90
97#define ALPHA_UAC_SHIFT TIF_UAC_NOPRINT 91#define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */
98#define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \ 92#define TS_UAC_NOFIX 0x0002 /* ! flags as they match */
99 1 << TIF_UAC_SIGBUS) 93#define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'osf_sysinfo' */
94#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
95#define TS_POLLING 0x0010 /* idle task polling need_resched,
96 skip sending interrupt */
100 97
101#define SET_UNALIGN_CTL(task,value) ({ \ 98#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
102 task_thread_info(task)->flags = ((task_thread_info(task)->flags & \ 99
103 ~ALPHA_UAC_MASK) \ 100#ifndef __ASSEMBLY__
104 | (((value) << ALPHA_UAC_SHIFT) & (1<<TIF_UAC_NOPRINT))\ 101#define HAVE_SET_RESTORE_SIGMASK 1
105 | (((value) << (ALPHA_UAC_SHIFT + 1)) & (1<<TIF_UAC_SIGBUS)) \ 102static inline void set_restore_sigmask(void)
106 | (((value) << (ALPHA_UAC_SHIFT - 1)) & (1<<TIF_UAC_NOFIX)));\ 103{
104 struct thread_info *ti = current_thread_info();
105 ti->status |= TS_RESTORE_SIGMASK;
106 WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
107}
108static inline void clear_restore_sigmask(void)
109{
110 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
111}
112static inline bool test_restore_sigmask(void)
113{
114 return current_thread_info()->status & TS_RESTORE_SIGMASK;
115}
116static inline bool test_and_clear_restore_sigmask(void)
117{
118 struct thread_info *ti = current_thread_info();
119 if (!(ti->status & TS_RESTORE_SIGMASK))
120 return false;
121 ti->status &= ~TS_RESTORE_SIGMASK;
122 return true;
123}
124#endif
125
126#define SET_UNALIGN_CTL(task,value) ({ \
127 __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
128 if (value & PR_UNALIGN_NOPRINT) \
129 status |= TS_UAC_NOPRINT; \
130 if (value & PR_UNALIGN_SIGBUS) \
131 status |= TS_UAC_SIGBUS; \
132 if (value & 4) /* alpha-specific */ \
133 status |= TS_UAC_NOFIX; \
134 task_thread_info(task)->status = status; \
107 0; }) 135 0; })
108 136
109#define GET_UNALIGN_CTL(task,value) ({ \ 137#define GET_UNALIGN_CTL(task,value) ({ \
110 put_user((task_thread_info(task)->flags & (1 << TIF_UAC_NOPRINT))\ 138 __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
111 >> ALPHA_UAC_SHIFT \ 139 __u32 res = 0; \
112 | (task_thread_info(task)->flags & (1 << TIF_UAC_SIGBUS))\ 140 if (status & TS_UAC_NOPRINT) \
113 >> (ALPHA_UAC_SHIFT + 1) \ 141 res |= PR_UNALIGN_NOPRINT; \
114 | (task_thread_info(task)->flags & (1 << TIF_UAC_NOFIX))\ 142 if (status & TS_UAC_SIGBUS) \
115 >> (ALPHA_UAC_SHIFT - 1), \ 143 res |= PR_UNALIGN_SIGBUS; \
116 (int __user *)(value)); \ 144 if (status & TS_UAC_NOFIX) \
145 res |= 4; \
146 put_user(res, (int __user *)(value)); \
117 }) 147 })
118 148
119#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
120
121#endif /* __KERNEL__ */ 149#endif /* __KERNEL__ */
122#endif /* _ALPHA_THREAD_INFO_H */ 150#endif /* _ALPHA_THREAD_INFO_H */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 9eb090582cf1..1e6956a90608 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -793,8 +793,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
793 case GSI_UACPROC: 793 case GSI_UACPROC:
794 if (nbytes < sizeof(unsigned int)) 794 if (nbytes < sizeof(unsigned int))
795 return -EINVAL; 795 return -EINVAL;
796 w = (current_thread_info()->flags >> ALPHA_UAC_SHIFT) & 796 w = current_thread_info()->status & UAC_BITMASK;
797 UAC_BITMASK;
798 if (put_user(w, (unsigned int __user *)buffer)) 797 if (put_user(w, (unsigned int __user *)buffer))
799 return -EFAULT; 798 return -EFAULT;
800 return 1; 799 return 1;
@@ -904,24 +903,20 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
904 break; 903 break;
905 904
906 case SSI_NVPAIRS: { 905 case SSI_NVPAIRS: {
907 unsigned long v, w, i; 906 unsigned __user *p = buffer;
908 unsigned int old, new; 907 unsigned i;
909 908
910 for (i = 0; i < nbytes; ++i) { 909 for (i = 0, p = buffer; i < nbytes; ++i, p += 2) {
910 unsigned v, w, status;
911 911
912 if (get_user(v, 2*i + (unsigned int __user *)buffer)) 912 if (get_user(v, p) || get_user(w, p + 1))
913 return -EFAULT;
914 if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer))
915 return -EFAULT; 913 return -EFAULT;
916 switch (v) { 914 switch (v) {
917 case SSIN_UACPROC: 915 case SSIN_UACPROC:
918 again: 916 w &= UAC_BITMASK;
919 old = current_thread_info()->flags; 917 status = current_thread_info()->status;
920 new = old & ~(UAC_BITMASK << ALPHA_UAC_SHIFT); 918 status = (status & ~UAC_BITMASK) | w;
921 new = new | (w & UAC_BITMASK) << ALPHA_UAC_SHIFT; 919 current_thread_info()->status = status;
922 if (cmpxchg(&current_thread_info()->flags,
923 old, new) != old)
924 goto again;
925 break; 920 break;
926 921
927 default: 922 default:
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 4054e0ffe2b2..51987dcf79b8 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(pm_power_off);
49void 49void
50cpu_idle(void) 50cpu_idle(void)
51{ 51{
52 set_thread_flag(TIF_POLLING_NRFLAG); 52 current_thread_info()->status |= TS_POLLING;
53 53
54 while (1) { 54 while (1) {
55 /* FIXME -- EV6 and LCA45 know how to power down 55 /* FIXME -- EV6 and LCA45 know how to power down
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 80d987c0e9aa..272666d006df 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -780,17 +780,17 @@ do_entUnaUser(void __user * va, unsigned long opcode,
780 /* Check the UAC bits to decide what the user wants us to do 780 /* Check the UAC bits to decide what the user wants us to do
781 with the unaliged access. */ 781 with the unaliged access. */
782 782
783 if (!test_thread_flag (TIF_UAC_NOPRINT)) { 783 if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
784 if (__ratelimit(&ratelimit)) { 784 if (__ratelimit(&ratelimit)) {
785 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", 785 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
786 current->comm, task_pid_nr(current), 786 current->comm, task_pid_nr(current),
787 regs->pc - 4, va, opcode, reg); 787 regs->pc - 4, va, opcode, reg);
788 } 788 }
789 } 789 }
790 if (test_thread_flag (TIF_UAC_SIGBUS)) 790 if ((current_thread_info()->status & TS_UAC_SIGBUS))
791 goto give_sigbus; 791 goto give_sigbus;
792 /* Not sure why you'd want to use this, but... */ 792 /* Not sure why you'd want to use this, but... */
793 if (test_thread_flag (TIF_UAC_NOFIX)) 793 if ((current_thread_info()->status & TS_UAC_NOFIX))
794 return; 794 return;
795 795
796 /* Don't bother reading ds in the access check since we already 796 /* Don't bother reading ds in the access check since we already
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 73067efd4845..ade7e924bef5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1603,8 +1603,8 @@ config NR_CPUS
1603 default "4" 1603 default "4"
1604 1604
1605config HOTPLUG_CPU 1605config HOTPLUG_CPU
1606 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" 1606 bool "Support for hot-pluggable CPUs"
1607 depends on SMP && HOTPLUG && EXPERIMENTAL 1607 depends on SMP && HOTPLUG
1608 help 1608 help
1609 Say Y here to experiment with turning CPUs off and on. CPUs 1609 Say Y here to experiment with turning CPUs off and on. CPUs
1610 can be controlled through /sys/devices/system/cpu. 1610 can be controlled through /sys/devices/system/cpu.
@@ -1645,8 +1645,8 @@ config HZ
1645 default 100 1645 default 100
1646 1646
1647config THUMB2_KERNEL 1647config THUMB2_KERNEL
1648 bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)" 1648 bool "Compile the kernel in Thumb-2 mode"
1649 depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL 1649 depends on CPU_V7 && !CPU_V6 && !CPU_V6K
1650 select AEABI 1650 select AEABI
1651 select ARM_ASM_UNIFIED 1651 select ARM_ASM_UNIFIED
1652 select ARM_UNWIND 1652 select ARM_UNWIND
@@ -1850,6 +1850,7 @@ config XEN_DOM0
1850config XEN 1850config XEN
1851 bool "Xen guest support on ARM (EXPERIMENTAL)" 1851 bool "Xen guest support on ARM (EXPERIMENTAL)"
1852 depends on EXPERIMENTAL && ARM && OF 1852 depends on EXPERIMENTAL && ARM && OF
1853 depends on CPU_V7 && !CPU_V6
1853 help 1854 help
1854 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. 1855 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
1855 1856
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index f023e3acdfbd..5f914fca911b 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -21,8 +21,6 @@ endif
21OBJCOPYFLAGS :=-O binary -R .comment -S 21OBJCOPYFLAGS :=-O binary -R .comment -S
22GZFLAGS :=-9 22GZFLAGS :=-9
23#KBUILD_CFLAGS +=-pipe 23#KBUILD_CFLAGS +=-pipe
24# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
25KBUILD_CFLAGS +=$(call cc-option,-marm,)
26 24
27# Never generate .eh_frame 25# Never generate .eh_frame
28KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm) 26KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
@@ -105,17 +103,20 @@ endif
105ifeq ($(CONFIG_THUMB2_KERNEL),y) 103ifeq ($(CONFIG_THUMB2_KERNEL),y)
106AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it) 104AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
107AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) 105AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
108CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN) 106CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
109AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb 107AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
110# Work around buggy relocation from gas if requested: 108# Work around buggy relocation from gas if requested:
111ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y) 109ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
112CFLAGS_MODULE +=-fno-optimize-sibling-calls 110CFLAGS_MODULE +=-fno-optimize-sibling-calls
113endif 111endif
112else
113CFLAGS_ISA :=$(call cc-option,-marm,)
114AFLAGS_ISA :=$(CFLAGS_ISA)
114endif 115endif
115 116
116# Need -Uarm for gcc < 3.x 117# Need -Uarm for gcc < 3.x
117KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_THUMB2) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm 118KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
118KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_THUMB2) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float 119KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
119 120
120CHECKFLAGS += -D__arm__ 121CHECKFLAGS += -D__arm__
121 122
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 3fdab016aa5c..f2aa09eb658e 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -33,7 +33,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y)
33 33
34$(obj)/xipImage: vmlinux FORCE 34$(obj)/xipImage: vmlinux FORCE
35 $(call if_changed,objcopy) 35 $(call if_changed,objcopy)
36 @echo ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))' 36 $(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
37 37
38$(obj)/Image $(obj)/zImage: FORCE 38$(obj)/Image $(obj)/zImage: FORCE
39 @echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)' 39 @echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
@@ -48,14 +48,14 @@ $(obj)/xipImage: FORCE
48 48
49$(obj)/Image: vmlinux FORCE 49$(obj)/Image: vmlinux FORCE
50 $(call if_changed,objcopy) 50 $(call if_changed,objcopy)
51 @echo ' Kernel: $@ is ready' 51 $(kecho) ' Kernel: $@ is ready'
52 52
53$(obj)/compressed/vmlinux: $(obj)/Image FORCE 53$(obj)/compressed/vmlinux: $(obj)/Image FORCE
54 $(Q)$(MAKE) $(build)=$(obj)/compressed $@ 54 $(Q)$(MAKE) $(build)=$(obj)/compressed $@
55 55
56$(obj)/zImage: $(obj)/compressed/vmlinux FORCE 56$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
57 $(call if_changed,objcopy) 57 $(call if_changed,objcopy)
58 @echo ' Kernel: $@ is ready' 58 $(kecho) ' Kernel: $@ is ready'
59 59
60endif 60endif
61 61
@@ -90,7 +90,7 @@ fi
90$(obj)/uImage: $(obj)/zImage FORCE 90$(obj)/uImage: $(obj)/zImage FORCE
91 @$(check_for_multiple_loadaddr) 91 @$(check_for_multiple_loadaddr)
92 $(call if_changed,uimage) 92 $(call if_changed,uimage)
93 @echo ' Image $@ is ready' 93 $(kecho) ' Image $@ is ready'
94 94
95$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE 95$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
96 $(Q)$(MAKE) $(build)=$(obj)/bootp $@ 96 $(Q)$(MAKE) $(build)=$(obj)/bootp $@
@@ -98,7 +98,7 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
98 98
99$(obj)/bootpImage: $(obj)/bootp/bootp FORCE 99$(obj)/bootpImage: $(obj)/bootp/bootp FORCE
100 $(call if_changed,objcopy) 100 $(call if_changed,objcopy)
101 @echo ' Kernel: $@ is ready' 101 $(kecho) ' Kernel: $@ is ready'
102 102
103PHONY += initrd FORCE 103PHONY += initrd FORCE
104initrd: 104initrd:
diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h
index 59426a4595c9..e847d23351ed 100644
--- a/arch/arm/include/asm/flat.h
+++ b/arch/arm/include/asm/flat.h
@@ -8,7 +8,7 @@
8#define flat_argvp_envp_on_stack() 1 8#define flat_argvp_envp_on_stack() 1
9#define flat_old_ram_flag(flags) (flags) 9#define flat_old_ram_flag(flags) (flags)
10#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 10#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
11#define flat_get_addr_from_rp(rp, relval, flags, persistent) get_unaligned(rp) 11#define flat_get_addr_from_rp(rp, relval, flags, persistent) ((void)persistent,get_unaligned(rp))
12#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp) 12#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
13#define flat_get_relocate_addr(rel) (rel) 13#define flat_get_relocate_addr(rel) (rel)
14#define flat_set_persistent(relval, p) 0 14#define flat_set_persistent(relval, p) 0
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 77bd79f2ffdb..7e1f76027f66 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -200,8 +200,8 @@ extern int __put_user_8(void *, unsigned long long);
200#define USER_DS KERNEL_DS 200#define USER_DS KERNEL_DS
201 201
202#define segment_eq(a,b) (1) 202#define segment_eq(a,b) (1)
203#define __addr_ok(addr) (1) 203#define __addr_ok(addr) ((void)(addr),1)
204#define __range_ok(addr,size) (0) 204#define __range_ok(addr,size) ((void)(addr),0)
205#define get_fs() (KERNEL_DS) 205#define get_fs() (KERNEL_DS)
206 206
207static inline void set_fs(mm_segment_t fs) 207static inline void set_fs(mm_segment_t fs)
diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h
index ae05e56dd17d..5000397134b4 100644
--- a/arch/arm/include/asm/xen/interface.h
+++ b/arch/arm/include/asm/xen/interface.h
@@ -29,16 +29,22 @@
29 29
30#ifndef __ASSEMBLY__ 30#ifndef __ASSEMBLY__
31/* Explicitly size integers that represent pfns in the interface with 31/* Explicitly size integers that represent pfns in the interface with
32 * Xen so that we can have one ABI that works for 32 and 64 bit guests. */ 32 * Xen so that we can have one ABI that works for 32 and 64 bit guests.
33 * Note that this means that the xen_pfn_t type may be capable of
34 * representing pfn's which the guest cannot represent in its own pfn
35 * type. However since pfn space is controlled by the guest this is
36 * fine since it simply wouldn't be able to create any sure pfns in
37 * the first place.
38 */
33typedef uint64_t xen_pfn_t; 39typedef uint64_t xen_pfn_t;
40#define PRI_xen_pfn "llx"
34typedef uint64_t xen_ulong_t; 41typedef uint64_t xen_ulong_t;
42#define PRI_xen_ulong "llx"
35/* Guest handles for primitive C types. */ 43/* Guest handles for primitive C types. */
36__DEFINE_GUEST_HANDLE(uchar, unsigned char); 44__DEFINE_GUEST_HANDLE(uchar, unsigned char);
37__DEFINE_GUEST_HANDLE(uint, unsigned int); 45__DEFINE_GUEST_HANDLE(uint, unsigned int);
38__DEFINE_GUEST_HANDLE(ulong, unsigned long);
39DEFINE_GUEST_HANDLE(char); 46DEFINE_GUEST_HANDLE(char);
40DEFINE_GUEST_HANDLE(int); 47DEFINE_GUEST_HANDLE(int);
41DEFINE_GUEST_HANDLE(long);
42DEFINE_GUEST_HANDLE(void); 48DEFINE_GUEST_HANDLE(void);
43DEFINE_GUEST_HANDLE(uint64_t); 49DEFINE_GUEST_HANDLE(uint64_t);
44DEFINE_GUEST_HANDLE(uint32_t); 50DEFINE_GUEST_HANDLE(uint32_t);
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 174202318dff..c6b9096cef95 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -10,7 +10,7 @@
10#include <xen/interface/grant_table.h> 10#include <xen/interface/grant_table.h>
11 11
12#define pfn_to_mfn(pfn) (pfn) 12#define pfn_to_mfn(pfn) (pfn)
13#define phys_to_machine_mapping_valid (1) 13#define phys_to_machine_mapping_valid(pfn) (1)
14#define mfn_to_pfn(mfn) (mfn) 14#define mfn_to_pfn(mfn) (mfn)
15#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 15#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
16 16
@@ -30,6 +30,8 @@ typedef struct xpaddr {
30#define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) 30#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
31#define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) 31#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
32 32
33#define INVALID_P2M_ENTRY (~0UL)
34
33static inline xmaddr_t phys_to_machine(xpaddr_t phys) 35static inline xmaddr_t phys_to_machine(xpaddr_t phys)
34{ 36{
35 unsigned offset = phys.paddr & ~PAGE_MASK; 37 unsigned offset = phys.paddr & ~PAGE_MASK;
@@ -74,9 +76,14 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte)
74 return 0; 76 return 0;
75} 77}
76 78
79static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
80{
81 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
82 return true;
83}
84
77static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) 85static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
78{ 86{
79 BUG(); 87 return __set_phys_to_machine(pfn, mfn);
80 return false;
81} 88}
82#endif /* _ASM_ARM_XEN_PAGE_H */ 89#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 16cedb42c0c3..896165096d6a 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -34,6 +34,7 @@
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/kallsyms.h> 35#include <linux/kallsyms.h>
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/export.h>
37 38
38#include <asm/exception.h> 39#include <asm/exception.h>
39#include <asm/mach/arch.h> 40#include <asm/mach/arch.h>
@@ -109,6 +110,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
109 /* Order is clear bits in "clr" then set bits in "set" */ 110 /* Order is clear bits in "clr" then set bits in "set" */
110 irq_modify_status(irq, clr, set & ~clr); 111 irq_modify_status(irq, clr, set & ~clr);
111} 112}
113EXPORT_SYMBOL_GPL(set_irq_flags);
112 114
113void __init init_IRQ(void) 115void __init init_IRQ(void)
114{ 116{
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index 38c1a3b103a0..839312905067 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -366,7 +366,9 @@ void kprobe_arm_test_cases(void)
366 TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3") 366 TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3")
367 TEST_UNSUPPORTED(".word 0xe0500090 @ undef") 367 TEST_UNSUPPORTED(".word 0xe0500090 @ undef")
368 TEST_UNSUPPORTED(".word 0xe05fff9f @ undef") 368 TEST_UNSUPPORTED(".word 0xe05fff9f @ undef")
369#endif
369 370
371#if __LINUX_ARM_ARCH__ >= 7
370 TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") 372 TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
371 TEST_RRR( "mlshi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") 373 TEST_RRR( "mlshi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
372 TEST_RR( "mls lr, r",1, VAL2,", r",2, VAL3,", r13") 374 TEST_RR( "mls lr, r",1, VAL2,", r",2, VAL3,", r13")
@@ -456,6 +458,8 @@ void kprobe_arm_test_cases(void)
456 TEST_UNSUPPORTED(".word 0xe1700090") /* Unallocated space */ 458 TEST_UNSUPPORTED(".word 0xe1700090") /* Unallocated space */
457#if __LINUX_ARM_ARCH__ >= 6 459#if __LINUX_ARM_ARCH__ >= 6
458 TEST_UNSUPPORTED("ldrex r2, [sp]") 460 TEST_UNSUPPORTED("ldrex r2, [sp]")
461#endif
462#if (__LINUX_ARM_ARCH__ >= 7) || defined(CONFIG_CPU_32v6K)
459 TEST_UNSUPPORTED("strexd r0, r2, r3, [sp]") 463 TEST_UNSUPPORTED("strexd r0, r2, r3, [sp]")
460 TEST_UNSUPPORTED("ldrexd r2, r3, [sp]") 464 TEST_UNSUPPORTED("ldrexd r2, r3, [sp]")
461 TEST_UNSUPPORTED("strexb r0, r2, [sp]") 465 TEST_UNSUPPORTED("strexb r0, r2, [sp]")
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index e29c3337ca81..8ef8c9337809 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -45,10 +45,9 @@ int machine_kexec_prepare(struct kimage *image)
45 for (i = 0; i < image->nr_segments; i++) { 45 for (i = 0; i < image->nr_segments; i++) {
46 current_segment = &image->segment[i]; 46 current_segment = &image->segment[i];
47 47
48 err = memblock_is_region_memory(current_segment->mem, 48 if (!memblock_is_region_memory(current_segment->mem,
49 current_segment->memsz); 49 current_segment->memsz))
50 if (err) 50 return -EINVAL;
51 return - EINVAL;
52 51
53 err = get_user(header, (__be32*)current_segment->buf); 52 err = get_user(header, (__be32*)current_segment->buf);
54 if (err) 53 if (err)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 93971b1a4f0b..53c0304b734a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -96,6 +96,10 @@ armpmu_event_set_period(struct perf_event *event,
96 s64 period = hwc->sample_period; 96 s64 period = hwc->sample_period;
97 int ret = 0; 97 int ret = 0;
98 98
99 /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
100 if (unlikely(period != hwc->last_period))
101 left = period - (hwc->last_period - left);
102
99 if (unlikely(left <= -period)) { 103 if (unlikely(left <= -period)) {
100 left = period; 104 left = period;
101 local64_set(&hwc->period_left, left); 105 local64_set(&hwc->period_left, left);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 8e20754dd31d..fbc8b2623d82 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -294,18 +294,24 @@ static void percpu_timer_setup(void);
294asmlinkage void __cpuinit secondary_start_kernel(void) 294asmlinkage void __cpuinit secondary_start_kernel(void)
295{ 295{
296 struct mm_struct *mm = &init_mm; 296 struct mm_struct *mm = &init_mm;
297 unsigned int cpu = smp_processor_id(); 297 unsigned int cpu;
298
299 /*
300 * The identity mapping is uncached (strongly ordered), so
301 * switch away from it before attempting any exclusive accesses.
302 */
303 cpu_switch_mm(mm->pgd, mm);
304 enter_lazy_tlb(mm, current);
305 local_flush_tlb_all();
298 306
299 /* 307 /*
300 * All kernel threads share the same mm context; grab a 308 * All kernel threads share the same mm context; grab a
301 * reference and switch to it. 309 * reference and switch to it.
302 */ 310 */
311 cpu = smp_processor_id();
303 atomic_inc(&mm->mm_count); 312 atomic_inc(&mm->mm_count);
304 current->active_mm = mm; 313 current->active_mm = mm;
305 cpumask_set_cpu(cpu, mm_cpumask(mm)); 314 cpumask_set_cpu(cpu, mm_cpumask(mm));
306 cpu_switch_mm(mm->pgd, mm);
307 enter_lazy_tlb(mm, current);
308 local_flush_tlb_all();
309 315
310 printk("CPU%u: Booted secondary processor\n", cpu); 316 printk("CPU%u: Booted secondary processor\n", cpu);
311 317
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index e1f906989bb8..b22d700fea27 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -42,10 +42,10 @@ static void twd_set_mode(enum clock_event_mode mode,
42 42
43 switch (mode) { 43 switch (mode) {
44 case CLOCK_EVT_MODE_PERIODIC: 44 case CLOCK_EVT_MODE_PERIODIC:
45 /* timer load already set up */
46 ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE 45 ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
47 | TWD_TIMER_CONTROL_PERIODIC; 46 | TWD_TIMER_CONTROL_PERIODIC;
48 __raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD); 47 __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
48 twd_base + TWD_TIMER_LOAD);
49 break; 49 break;
50 case CLOCK_EVT_MODE_ONESHOT: 50 case CLOCK_EVT_MODE_ONESHOT:
51 /* period set, and timer enabled in 'next_event' hook */ 51 /* period set, and timer enabled in 'next_event' hook */
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 9d0a30032d7f..0dc53854a5d8 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -45,6 +45,7 @@ int read_current_timer(unsigned long *timer_val)
45 *timer_val = delay_timer->read_current_timer(); 45 *timer_val = delay_timer->read_current_timer();
46 return 0; 46 return 0;
47} 47}
48EXPORT_SYMBOL_GPL(read_current_timer);
48 49
49static void __timer_delay(unsigned long cycles) 50static void __timer_delay(unsigned long cycles)
50{ 51{
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b9f60ebe3bc4..023f443784ec 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -856,8 +856,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
856 if (thumb2_32b) { 856 if (thumb2_32b) {
857 offset.un = 0; 857 offset.un = 0;
858 handler = do_alignment_t32_to_handler(&instr, regs, &offset); 858 handler = do_alignment_t32_to_handler(&instr, regs, &offset);
859 } else 859 } else {
860 offset.un = 0;
860 handler = do_alignment_ldmstm; 861 handler = do_alignment_ldmstm;
862 }
861 break; 863 break;
862 864
863 default: 865 default:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 477a2d23ddf1..58bc3e4d3bd0 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -610,7 +610,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
610 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) 610 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
611{ 611{
612 u64 mask = get_coherent_dma_mask(dev); 612 u64 mask = get_coherent_dma_mask(dev);
613 struct page *page; 613 struct page *page = NULL;
614 void *addr; 614 void *addr;
615 615
616#ifdef CONFIG_DMA_API_DEBUG 616#ifdef CONFIG_DMA_API_DEBUG
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index bf312c354a21..0f5a5f2a2c7b 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -17,7 +17,6 @@ struct arm_vmregion {
17 struct list_head vm_list; 17 struct list_head vm_list;
18 unsigned long vm_start; 18 unsigned long vm_start;
19 unsigned long vm_end; 19 unsigned long vm_end;
20 void *priv;
21 int vm_active; 20 int vm_active;
22 const void *caller; 21 const void *caller;
23}; 22};
diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile
index 635cb1865e4d..cd60a81163e9 100644
--- a/arch/arm/tools/Makefile
+++ b/arch/arm/tools/Makefile
@@ -5,6 +5,6 @@
5# 5#
6 6
7include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types 7include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
8 @echo ' Generating $@' 8 $(kecho) ' Generating $@'
9 @mkdir -p $(dir $@) 9 @mkdir -p $(dir $@)
10 $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; } 10 $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
diff --git a/arch/arm/xen/grant-table.c b/arch/arm/xen/grant-table.c
index dbd1330c0196..859a9bb002d5 100644
--- a/arch/arm/xen/grant-table.c
+++ b/arch/arm/xen/grant-table.c
@@ -33,7 +33,7 @@
33#include <xen/page.h> 33#include <xen/page.h>
34#include <xen/grant_table.h> 34#include <xen/grant_table.h>
35 35
36int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, 36int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
37 unsigned long max_nr_gframes, 37 unsigned long max_nr_gframes,
38 void **__shared) 38 void **__shared)
39{ 39{
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 4a159da23633..dd02f09b6eda 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,3 +1,4 @@
1 1
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += exec.h 3generic-y += exec.h
4generic-y += kvm_para.h
diff --git a/arch/ia64/include/asm/kvm_para.h b/arch/ia64/include/asm/kvm_para.h
deleted file mode 100644
index 47c00f910434..000000000000
--- a/arch/ia64/include/asm/kvm_para.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 */
18#ifndef __IA64_KVM_PARA_H
19#define __IA64_KVM_PARA_H
20
21#include <uapi/asm/kvm_para.h>
22
23
24static inline unsigned int kvm_arch_para_features(void)
25{
26 return 0;
27}
28
29static inline bool kvm_check_and_clear_guest_paused(void)
30{
31 return false;
32}
33
34#endif
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index ecb540810ab3..88fa3ac86fae 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,5 +1,3 @@
1include include/asm-generic/Kbuild.asm
2header-y += cachectl.h
3 1
4generic-y += bitsperlong.h 2generic-y += bitsperlong.h
5generic-y += clkdev.h 3generic-y += clkdev.h
diff --git a/arch/m68k/include/asm/ptrace.h b/arch/m68k/include/asm/ptrace.h
index 5e08b597f012..0f717045bdde 100644
--- a/arch/m68k/include/asm/ptrace.h
+++ b/arch/m68k/include/asm/ptrace.h
@@ -1,82 +1,10 @@
1#ifndef _M68K_PTRACE_H 1#ifndef _M68K_PTRACE_H
2#define _M68K_PTRACE_H 2#define _M68K_PTRACE_H
3 3
4#define PT_D1 0 4#include <uapi/asm/ptrace.h>
5#define PT_D2 1
6#define PT_D3 2
7#define PT_D4 3
8#define PT_D5 4
9#define PT_D6 5
10#define PT_D7 6
11#define PT_A0 7
12#define PT_A1 8
13#define PT_A2 9
14#define PT_A3 10
15#define PT_A4 11
16#define PT_A5 12
17#define PT_A6 13
18#define PT_D0 14
19#define PT_USP 15
20#define PT_ORIG_D0 16
21#define PT_SR 17
22#define PT_PC 18
23 5
24#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
25 7
26/* this struct defines the way the registers are stored on the
27 stack during a system call. */
28
29struct pt_regs {
30 long d1;
31 long d2;
32 long d3;
33 long d4;
34 long d5;
35 long a0;
36 long a1;
37 long a2;
38 long d0;
39 long orig_d0;
40 long stkadj;
41#ifdef CONFIG_COLDFIRE
42 unsigned format : 4; /* frame format specifier */
43 unsigned vector : 12; /* vector offset */
44 unsigned short sr;
45 unsigned long pc;
46#else
47 unsigned short sr;
48 unsigned long pc;
49 unsigned format : 4; /* frame format specifier */
50 unsigned vector : 12; /* vector offset */
51#endif
52};
53
54/*
55 * This is the extended stack used by signal handlers and the context
56 * switcher: it's pushed after the normal "struct pt_regs".
57 */
58struct switch_stack {
59 unsigned long d6;
60 unsigned long d7;
61 unsigned long a3;
62 unsigned long a4;
63 unsigned long a5;
64 unsigned long a6;
65 unsigned long retpc;
66};
67
68/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
69#define PTRACE_GETREGS 12
70#define PTRACE_SETREGS 13
71#define PTRACE_GETFPREGS 14
72#define PTRACE_SETFPREGS 15
73
74#define PTRACE_GET_THREAD_AREA 25
75
76#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
77
78#ifdef __KERNEL__
79
80#ifndef PS_S 8#ifndef PS_S
81#define PS_S (0x2000) 9#define PS_S (0x2000)
82#define PS_M (0x1000) 10#define PS_M (0x1000)
@@ -94,6 +22,5 @@ struct switch_stack {
94#define arch_has_block_step() (1) 22#define arch_has_block_step() (1)
95#endif 23#endif
96 24
97#endif /* __KERNEL__ */
98#endif /* __ASSEMBLY__ */ 25#endif /* __ASSEMBLY__ */
99#endif /* _M68K_PTRACE_H */ 26#endif /* _M68K_PTRACE_H */
diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h
index 00c2c5397d37..65e78a2dad64 100644
--- a/arch/m68k/include/asm/setup.h
+++ b/arch/m68k/include/asm/setup.h
@@ -19,33 +19,12 @@
19** Redesign of the boot information structure; moved boot information 19** Redesign of the boot information structure; moved boot information
20** structure to bootinfo.h 20** structure to bootinfo.h
21*/ 21*/
22
23#ifndef _M68K_SETUP_H 22#ifndef _M68K_SETUP_H
24#define _M68K_SETUP_H 23#define _M68K_SETUP_H
25 24
25#include <uapi/asm/setup.h>
26 26
27 27
28 /*
29 * Linux/m68k Architectures
30 */
31
32#define MACH_AMIGA 1
33#define MACH_ATARI 2
34#define MACH_MAC 3
35#define MACH_APOLLO 4
36#define MACH_SUN3 5
37#define MACH_MVME147 6
38#define MACH_MVME16x 7
39#define MACH_BVME6000 8
40#define MACH_HP300 9
41#define MACH_Q40 10
42#define MACH_SUN3X 11
43#define MACH_M54XX 12
44
45#define COMMAND_LINE_SIZE 256
46
47#ifdef __KERNEL__
48
49#define CL_SIZE COMMAND_LINE_SIZE 28#define CL_SIZE COMMAND_LINE_SIZE
50 29
51#ifndef __ASSEMBLY__ 30#ifndef __ASSEMBLY__
@@ -194,63 +173,6 @@ extern unsigned long m68k_machtype;
194# define MACH_TYPE (m68k_machtype) 173# define MACH_TYPE (m68k_machtype)
195#endif 174#endif
196 175
197#endif /* __KERNEL__ */
198
199
200 /*
201 * CPU, FPU and MMU types
202 *
203 * Note: we may rely on the following equalities:
204 *
205 * CPU_68020 == MMU_68851
206 * CPU_68030 == MMU_68030
207 * CPU_68040 == FPU_68040 == MMU_68040
208 * CPU_68060 == FPU_68060 == MMU_68060
209 */
210
211#define CPUB_68020 0
212#define CPUB_68030 1
213#define CPUB_68040 2
214#define CPUB_68060 3
215#define CPUB_COLDFIRE 4
216
217#define CPU_68020 (1<<CPUB_68020)
218#define CPU_68030 (1<<CPUB_68030)
219#define CPU_68040 (1<<CPUB_68040)
220#define CPU_68060 (1<<CPUB_68060)
221#define CPU_COLDFIRE (1<<CPUB_COLDFIRE)
222
223#define FPUB_68881 0
224#define FPUB_68882 1
225#define FPUB_68040 2 /* Internal FPU */
226#define FPUB_68060 3 /* Internal FPU */
227#define FPUB_SUNFPA 4 /* Sun-3 FPA */
228#define FPUB_COLDFIRE 5 /* ColdFire FPU */
229
230#define FPU_68881 (1<<FPUB_68881)
231#define FPU_68882 (1<<FPUB_68882)
232#define FPU_68040 (1<<FPUB_68040)
233#define FPU_68060 (1<<FPUB_68060)
234#define FPU_SUNFPA (1<<FPUB_SUNFPA)
235#define FPU_COLDFIRE (1<<FPUB_COLDFIRE)
236
237#define MMUB_68851 0
238#define MMUB_68030 1 /* Internal MMU */
239#define MMUB_68040 2 /* Internal MMU */
240#define MMUB_68060 3 /* Internal MMU */
241#define MMUB_APOLLO 4 /* Custom Apollo */
242#define MMUB_SUN3 5 /* Custom Sun-3 */
243#define MMUB_COLDFIRE 6 /* Internal MMU */
244
245#define MMU_68851 (1<<MMUB_68851)
246#define MMU_68030 (1<<MMUB_68030)
247#define MMU_68040 (1<<MMUB_68040)
248#define MMU_68060 (1<<MMUB_68060)
249#define MMU_SUN3 (1<<MMUB_SUN3)
250#define MMU_APOLLO (1<<MMUB_APOLLO)
251#define MMU_COLDFIRE (1<<MMUB_COLDFIRE)
252
253#ifdef __KERNEL__
254 176
255#ifndef __ASSEMBLY__ 177#ifndef __ASSEMBLY__
256extern unsigned long m68k_cputype; 178extern unsigned long m68k_cputype;
@@ -385,6 +307,4 @@ extern int m68k_realnum_memory; /* real # of memory blocks found */
385extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */ 307extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */
386#endif 308#endif
387 309
388#endif /* __KERNEL__ */
389
390#endif /* _M68K_SETUP_H */ 310#endif /* _M68K_SETUP_H */
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
index 60e88660169c..67e489d8d1bd 100644
--- a/arch/m68k/include/asm/signal.h
+++ b/arch/m68k/include/asm/signal.h
@@ -1,12 +1,8 @@
1#ifndef _M68K_SIGNAL_H 1#ifndef _M68K_SIGNAL_H
2#define _M68K_SIGNAL_H 2#define _M68K_SIGNAL_H
3 3
4#include <linux/types.h> 4#include <uapi/asm/signal.h>
5 5
6/* Avoid too many header ordering problems. */
7struct siginfo;
8
9#ifdef __KERNEL__
10/* Most things should be clean enough to redefine this at will, if care 6/* Most things should be clean enough to redefine this at will, if care
11 is taken to make libc match. */ 7 is taken to make libc match. */
12 8
@@ -20,92 +16,6 @@ typedef struct {
20 unsigned long sig[_NSIG_WORDS]; 16 unsigned long sig[_NSIG_WORDS];
21} sigset_t; 17} sigset_t;
22 18
23#else
24/* Here we must cater to libcs that poke about in kernel headers. */
25
26#define NSIG 32
27typedef unsigned long sigset_t;
28
29#endif /* __KERNEL__ */
30
31#define SIGHUP 1
32#define SIGINT 2
33#define SIGQUIT 3
34#define SIGILL 4
35#define SIGTRAP 5
36#define SIGABRT 6
37#define SIGIOT 6
38#define SIGBUS 7
39#define SIGFPE 8
40#define SIGKILL 9
41#define SIGUSR1 10
42#define SIGSEGV 11
43#define SIGUSR2 12
44#define SIGPIPE 13
45#define SIGALRM 14
46#define SIGTERM 15
47#define SIGSTKFLT 16
48#define SIGCHLD 17
49#define SIGCONT 18
50#define SIGSTOP 19
51#define SIGTSTP 20
52#define SIGTTIN 21
53#define SIGTTOU 22
54#define SIGURG 23
55#define SIGXCPU 24
56#define SIGXFSZ 25
57#define SIGVTALRM 26
58#define SIGPROF 27
59#define SIGWINCH 28
60#define SIGIO 29
61#define SIGPOLL SIGIO
62/*
63#define SIGLOST 29
64*/
65#define SIGPWR 30
66#define SIGSYS 31
67#define SIGUNUSED 31
68
69/* These should not be considered constants from userland. */
70#define SIGRTMIN 32
71#define SIGRTMAX _NSIG
72
73/*
74 * SA_FLAGS values:
75 *
76 * SA_ONSTACK indicates that a registered stack_t will be used.
77 * SA_RESTART flag to get restarting signals (which were the default long ago)
78 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
79 * SA_RESETHAND clears the handler when the signal is delivered.
80 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
81 * SA_NODEFER prevents the current signal from being masked in the handler.
82 *
83 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
84 * Unix names RESETHAND and NODEFER respectively.
85 */
86#define SA_NOCLDSTOP 0x00000001
87#define SA_NOCLDWAIT 0x00000002
88#define SA_SIGINFO 0x00000004
89#define SA_ONSTACK 0x08000000
90#define SA_RESTART 0x10000000
91#define SA_NODEFER 0x40000000
92#define SA_RESETHAND 0x80000000
93
94#define SA_NOMASK SA_NODEFER
95#define SA_ONESHOT SA_RESETHAND
96
97/*
98 * sigaltstack controls
99 */
100#define SS_ONSTACK 1
101#define SS_DISABLE 2
102
103#define MINSIGSTKSZ 2048
104#define SIGSTKSZ 8192
105
106#include <asm-generic/signal-defs.h>
107
108#ifdef __KERNEL__
109struct old_sigaction { 19struct old_sigaction {
110 __sighandler_t sa_handler; 20 __sighandler_t sa_handler;
111 old_sigset_t sa_mask; 21 old_sigset_t sa_mask;
@@ -123,31 +33,6 @@ struct sigaction {
123struct k_sigaction { 33struct k_sigaction {
124 struct sigaction sa; 34 struct sigaction sa;
125}; 35};
126#else
127/* Here we must cater to libcs that poke about in kernel headers. */
128
129struct sigaction {
130 union {
131 __sighandler_t _sa_handler;
132 void (*_sa_sigaction)(int, struct siginfo *, void *);
133 } _u;
134 sigset_t sa_mask;
135 unsigned long sa_flags;
136 void (*sa_restorer)(void);
137};
138
139#define sa_handler _u._sa_handler
140#define sa_sigaction _u._sa_sigaction
141
142#endif /* __KERNEL__ */
143
144typedef struct sigaltstack {
145 void __user *ss_sp;
146 int ss_flags;
147 size_t ss_size;
148} stack_t;
149
150#ifdef __KERNEL__
151#include <asm/sigcontext.h> 36#include <asm/sigcontext.h>
152 37
153#ifndef CONFIG_CPU_HAS_NO_BITFIELDS 38#ifndef CONFIG_CPU_HAS_NO_BITFIELDS
@@ -208,5 +93,4 @@ struct pt_regs;
208extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie); 93extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
209#endif /* __uClinux__ */ 94#endif /* __uClinux__ */
210 95
211#endif /* __KERNEL__ */
212#endif /* _M68K_SIGNAL_H */ 96#endif /* _M68K_SIGNAL_H */
diff --git a/arch/m68k/include/asm/termios.h b/arch/m68k/include/asm/termios.h
index 0823032e4045..ad8efb098663 100644
--- a/arch/m68k/include/asm/termios.h
+++ b/arch/m68k/include/asm/termios.h
@@ -1,27 +1,8 @@
1#ifndef _M68K_TERMIOS_H 1#ifndef _M68K_TERMIOS_H
2#define _M68K_TERMIOS_H 2#define _M68K_TERMIOS_H
3 3
4#include <asm/termbits.h> 4#include <uapi/asm/termios.h>
5#include <asm/ioctls.h>
6 5
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24#ifdef __KERNEL__
25/* intr=^C quit=^| erase=del kill=^U 6/* intr=^C quit=^| erase=del kill=^U
26 eof=^D vtime=\0 vmin=\1 sxtc=\0 7 eof=^D vtime=\0 vmin=\1 sxtc=\0
27 start=^Q stop=^S susp=^Z eol=\0 8 start=^Q stop=^S susp=^Z eol=\0
@@ -29,27 +10,6 @@ struct termio {
29 eol2=\0 10 eol2=\0
30*/ 11*/
31#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" 12#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
32#endif
33
34/* modem lines */
35#define TIOCM_LE 0x001
36#define TIOCM_DTR 0x002
37#define TIOCM_RTS 0x004
38#define TIOCM_ST 0x008
39#define TIOCM_SR 0x010
40#define TIOCM_CTS 0x020
41#define TIOCM_CAR 0x040
42#define TIOCM_RNG 0x080
43#define TIOCM_DSR 0x100
44#define TIOCM_CD TIOCM_CAR
45#define TIOCM_RI TIOCM_RNG
46#define TIOCM_OUT1 0x2000
47#define TIOCM_OUT2 0x4000
48#define TIOCM_LOOP 0x8000
49
50/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
51
52#ifdef __KERNEL__
53 13
54/* 14/*
55 * Translate a "termio" structure into a "termios". Ugh. 15 * Translate a "termio" structure into a "termios". Ugh.
@@ -87,6 +47,4 @@ struct termio {
87#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) 47#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
88#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) 48#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
89 49
90#endif /* __KERNEL__ */
91
92#endif /* _M68K_TERMIOS_H */ 50#endif /* _M68K_TERMIOS_H */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index c702ad716791..5fc7f7bec1c8 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -1,361 +1,10 @@
1#ifndef _ASM_M68K_UNISTD_H_ 1#ifndef _ASM_M68K_UNISTD_H_
2#define _ASM_M68K_UNISTD_H_ 2#define _ASM_M68K_UNISTD_H_
3 3
4/* 4#include <uapi/asm/unistd.h>
5 * This file contains the system call numbers.
6 */
7
8#define __NR_restart_syscall 0
9#define __NR_exit 1
10#define __NR_fork 2
11#define __NR_read 3
12#define __NR_write 4
13#define __NR_open 5
14#define __NR_close 6
15#define __NR_waitpid 7
16#define __NR_creat 8
17#define __NR_link 9
18#define __NR_unlink 10
19#define __NR_execve 11
20#define __NR_chdir 12
21#define __NR_time 13
22#define __NR_mknod 14
23#define __NR_chmod 15
24#define __NR_chown 16
25/*#define __NR_break 17*/
26#define __NR_oldstat 18
27#define __NR_lseek 19
28#define __NR_getpid 20
29#define __NR_mount 21
30#define __NR_umount 22
31#define __NR_setuid 23
32#define __NR_getuid 24
33#define __NR_stime 25
34#define __NR_ptrace 26
35#define __NR_alarm 27
36#define __NR_oldfstat 28
37#define __NR_pause 29
38#define __NR_utime 30
39/*#define __NR_stty 31*/
40/*#define __NR_gtty 32*/
41#define __NR_access 33
42#define __NR_nice 34
43/*#define __NR_ftime 35*/
44#define __NR_sync 36
45#define __NR_kill 37
46#define __NR_rename 38
47#define __NR_mkdir 39
48#define __NR_rmdir 40
49#define __NR_dup 41
50#define __NR_pipe 42
51#define __NR_times 43
52/*#define __NR_prof 44*/
53#define __NR_brk 45
54#define __NR_setgid 46
55#define __NR_getgid 47
56#define __NR_signal 48
57#define __NR_geteuid 49
58#define __NR_getegid 50
59#define __NR_acct 51
60#define __NR_umount2 52
61/*#define __NR_lock 53*/
62#define __NR_ioctl 54
63#define __NR_fcntl 55
64/*#define __NR_mpx 56*/
65#define __NR_setpgid 57
66/*#define __NR_ulimit 58*/
67/*#define __NR_oldolduname 59*/
68#define __NR_umask 60
69#define __NR_chroot 61
70#define __NR_ustat 62
71#define __NR_dup2 63
72#define __NR_getppid 64
73#define __NR_getpgrp 65
74#define __NR_setsid 66
75#define __NR_sigaction 67
76#define __NR_sgetmask 68
77#define __NR_ssetmask 69
78#define __NR_setreuid 70
79#define __NR_setregid 71
80#define __NR_sigsuspend 72
81#define __NR_sigpending 73
82#define __NR_sethostname 74
83#define __NR_setrlimit 75
84#define __NR_getrlimit 76
85#define __NR_getrusage 77
86#define __NR_gettimeofday 78
87#define __NR_settimeofday 79
88#define __NR_getgroups 80
89#define __NR_setgroups 81
90#define __NR_select 82
91#define __NR_symlink 83
92#define __NR_oldlstat 84
93#define __NR_readlink 85
94#define __NR_uselib 86
95#define __NR_swapon 87
96#define __NR_reboot 88
97#define __NR_readdir 89
98#define __NR_mmap 90
99#define __NR_munmap 91
100#define __NR_truncate 92
101#define __NR_ftruncate 93
102#define __NR_fchmod 94
103#define __NR_fchown 95
104#define __NR_getpriority 96
105#define __NR_setpriority 97
106/*#define __NR_profil 98*/
107#define __NR_statfs 99
108#define __NR_fstatfs 100
109/*#define __NR_ioperm 101*/
110#define __NR_socketcall 102
111#define __NR_syslog 103
112#define __NR_setitimer 104
113#define __NR_getitimer 105
114#define __NR_stat 106
115#define __NR_lstat 107
116#define __NR_fstat 108
117/*#define __NR_olduname 109*/
118/*#define __NR_iopl 110*/ /* not supported */
119#define __NR_vhangup 111
120/*#define __NR_idle 112*/ /* Obsolete */
121/*#define __NR_vm86 113*/ /* not supported */
122#define __NR_wait4 114
123#define __NR_swapoff 115
124#define __NR_sysinfo 116
125#define __NR_ipc 117
126#define __NR_fsync 118
127#define __NR_sigreturn 119
128#define __NR_clone 120
129#define __NR_setdomainname 121
130#define __NR_uname 122
131#define __NR_cacheflush 123
132#define __NR_adjtimex 124
133#define __NR_mprotect 125
134#define __NR_sigprocmask 126
135#define __NR_create_module 127
136#define __NR_init_module 128
137#define __NR_delete_module 129
138#define __NR_get_kernel_syms 130
139#define __NR_quotactl 131
140#define __NR_getpgid 132
141#define __NR_fchdir 133
142#define __NR_bdflush 134
143#define __NR_sysfs 135
144#define __NR_personality 136
145/*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */
146#define __NR_setfsuid 138
147#define __NR_setfsgid 139
148#define __NR__llseek 140
149#define __NR_getdents 141
150#define __NR__newselect 142
151#define __NR_flock 143
152#define __NR_msync 144
153#define __NR_readv 145
154#define __NR_writev 146
155#define __NR_getsid 147
156#define __NR_fdatasync 148
157#define __NR__sysctl 149
158#define __NR_mlock 150
159#define __NR_munlock 151
160#define __NR_mlockall 152
161#define __NR_munlockall 153
162#define __NR_sched_setparam 154
163#define __NR_sched_getparam 155
164#define __NR_sched_setscheduler 156
165#define __NR_sched_getscheduler 157
166#define __NR_sched_yield 158
167#define __NR_sched_get_priority_max 159
168#define __NR_sched_get_priority_min 160
169#define __NR_sched_rr_get_interval 161
170#define __NR_nanosleep 162
171#define __NR_mremap 163
172#define __NR_setresuid 164
173#define __NR_getresuid 165
174#define __NR_getpagesize 166
175#define __NR_query_module 167
176#define __NR_poll 168
177#define __NR_nfsservctl 169
178#define __NR_setresgid 170
179#define __NR_getresgid 171
180#define __NR_prctl 172
181#define __NR_rt_sigreturn 173
182#define __NR_rt_sigaction 174
183#define __NR_rt_sigprocmask 175
184#define __NR_rt_sigpending 176
185#define __NR_rt_sigtimedwait 177
186#define __NR_rt_sigqueueinfo 178
187#define __NR_rt_sigsuspend 179
188#define __NR_pread64 180
189#define __NR_pwrite64 181
190#define __NR_lchown 182
191#define __NR_getcwd 183
192#define __NR_capget 184
193#define __NR_capset 185
194#define __NR_sigaltstack 186
195#define __NR_sendfile 187
196#define __NR_getpmsg 188 /* some people actually want streams */
197#define __NR_putpmsg 189 /* some people actually want streams */
198#define __NR_vfork 190
199#define __NR_ugetrlimit 191
200#define __NR_mmap2 192
201#define __NR_truncate64 193
202#define __NR_ftruncate64 194
203#define __NR_stat64 195
204#define __NR_lstat64 196
205#define __NR_fstat64 197
206#define __NR_chown32 198
207#define __NR_getuid32 199
208#define __NR_getgid32 200
209#define __NR_geteuid32 201
210#define __NR_getegid32 202
211#define __NR_setreuid32 203
212#define __NR_setregid32 204
213#define __NR_getgroups32 205
214#define __NR_setgroups32 206
215#define __NR_fchown32 207
216#define __NR_setresuid32 208
217#define __NR_getresuid32 209
218#define __NR_setresgid32 210
219#define __NR_getresgid32 211
220#define __NR_lchown32 212
221#define __NR_setuid32 213
222#define __NR_setgid32 214
223#define __NR_setfsuid32 215
224#define __NR_setfsgid32 216
225#define __NR_pivot_root 217
226/* 218*/
227/* 219*/
228#define __NR_getdents64 220
229#define __NR_gettid 221
230#define __NR_tkill 222
231#define __NR_setxattr 223
232#define __NR_lsetxattr 224
233#define __NR_fsetxattr 225
234#define __NR_getxattr 226
235#define __NR_lgetxattr 227
236#define __NR_fgetxattr 228
237#define __NR_listxattr 229
238#define __NR_llistxattr 230
239#define __NR_flistxattr 231
240#define __NR_removexattr 232
241#define __NR_lremovexattr 233
242#define __NR_fremovexattr 234
243#define __NR_futex 235
244#define __NR_sendfile64 236
245#define __NR_mincore 237
246#define __NR_madvise 238
247#define __NR_fcntl64 239
248#define __NR_readahead 240
249#define __NR_io_setup 241
250#define __NR_io_destroy 242
251#define __NR_io_getevents 243
252#define __NR_io_submit 244
253#define __NR_io_cancel 245
254#define __NR_fadvise64 246
255#define __NR_exit_group 247
256#define __NR_lookup_dcookie 248
257#define __NR_epoll_create 249
258#define __NR_epoll_ctl 250
259#define __NR_epoll_wait 251
260#define __NR_remap_file_pages 252
261#define __NR_set_tid_address 253
262#define __NR_timer_create 254
263#define __NR_timer_settime 255
264#define __NR_timer_gettime 256
265#define __NR_timer_getoverrun 257
266#define __NR_timer_delete 258
267#define __NR_clock_settime 259
268#define __NR_clock_gettime 260
269#define __NR_clock_getres 261
270#define __NR_clock_nanosleep 262
271#define __NR_statfs64 263
272#define __NR_fstatfs64 264
273#define __NR_tgkill 265
274#define __NR_utimes 266
275#define __NR_fadvise64_64 267
276#define __NR_mbind 268
277#define __NR_get_mempolicy 269
278#define __NR_set_mempolicy 270
279#define __NR_mq_open 271
280#define __NR_mq_unlink 272
281#define __NR_mq_timedsend 273
282#define __NR_mq_timedreceive 274
283#define __NR_mq_notify 275
284#define __NR_mq_getsetattr 276
285#define __NR_waitid 277
286/*#define __NR_vserver 278*/
287#define __NR_add_key 279
288#define __NR_request_key 280
289#define __NR_keyctl 281
290#define __NR_ioprio_set 282
291#define __NR_ioprio_get 283
292#define __NR_inotify_init 284
293#define __NR_inotify_add_watch 285
294#define __NR_inotify_rm_watch 286
295#define __NR_migrate_pages 287
296#define __NR_openat 288
297#define __NR_mkdirat 289
298#define __NR_mknodat 290
299#define __NR_fchownat 291
300#define __NR_futimesat 292
301#define __NR_fstatat64 293
302#define __NR_unlinkat 294
303#define __NR_renameat 295
304#define __NR_linkat 296
305#define __NR_symlinkat 297
306#define __NR_readlinkat 298
307#define __NR_fchmodat 299
308#define __NR_faccessat 300
309#define __NR_pselect6 301
310#define __NR_ppoll 302
311#define __NR_unshare 303
312#define __NR_set_robust_list 304
313#define __NR_get_robust_list 305
314#define __NR_splice 306
315#define __NR_sync_file_range 307
316#define __NR_tee 308
317#define __NR_vmsplice 309
318#define __NR_move_pages 310
319#define __NR_sched_setaffinity 311
320#define __NR_sched_getaffinity 312
321#define __NR_kexec_load 313
322#define __NR_getcpu 314
323#define __NR_epoll_pwait 315
324#define __NR_utimensat 316
325#define __NR_signalfd 317
326#define __NR_timerfd_create 318
327#define __NR_eventfd 319
328#define __NR_fallocate 320
329#define __NR_timerfd_settime 321
330#define __NR_timerfd_gettime 322
331#define __NR_signalfd4 323
332#define __NR_eventfd2 324
333#define __NR_epoll_create1 325
334#define __NR_dup3 326
335#define __NR_pipe2 327
336#define __NR_inotify_init1 328
337#define __NR_preadv 329
338#define __NR_pwritev 330
339#define __NR_rt_tgsigqueueinfo 331
340#define __NR_perf_event_open 332
341#define __NR_get_thread_area 333
342#define __NR_set_thread_area 334
343#define __NR_atomic_cmpxchg_32 335
344#define __NR_atomic_barrier 336
345#define __NR_fanotify_init 337
346#define __NR_fanotify_mark 338
347#define __NR_prlimit64 339
348#define __NR_name_to_handle_at 340
349#define __NR_open_by_handle_at 341
350#define __NR_clock_adjtime 342
351#define __NR_syncfs 343
352#define __NR_setns 344
353#define __NR_process_vm_readv 345
354#define __NR_process_vm_writev 346
355 5
356#ifdef __KERNEL__
357 6
358#define NR_syscalls 347 7#define NR_syscalls 348
359 8
360#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
361#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
@@ -393,5 +42,4 @@
393 */ 42 */
394#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 43#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
395 44
396#endif /* __KERNEL__ */
397#endif /* _ASM_M68K_UNISTD_H_ */ 45#endif /* _ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index baebb3da1d44..972bce120e1e 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -1,3 +1,26 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4header-y += a.out.h
5header-y += auxvec.h
6header-y += byteorder.h
7header-y += cachectl.h
8header-y += fcntl.h
9header-y += ioctls.h
10header-y += msgbuf.h
11header-y += param.h
12header-y += poll.h
13header-y += posix_types.h
14header-y += ptrace.h
15header-y += sembuf.h
16header-y += setup.h
17header-y += shmbuf.h
18header-y += sigcontext.h
19header-y += signal.h
20header-y += socket.h
21header-y += sockios.h
22header-y += stat.h
23header-y += swab.h
24header-y += termbits.h
25header-y += termios.h
26header-y += unistd.h
diff --git a/arch/m68k/include/asm/a.out.h b/arch/m68k/include/uapi/asm/a.out.h
index 3885fe43432a..3885fe43432a 100644
--- a/arch/m68k/include/asm/a.out.h
+++ b/arch/m68k/include/uapi/asm/a.out.h
diff --git a/arch/m68k/include/asm/auxvec.h b/arch/m68k/include/uapi/asm/auxvec.h
index 844d6d52204b..844d6d52204b 100644
--- a/arch/m68k/include/asm/auxvec.h
+++ b/arch/m68k/include/uapi/asm/auxvec.h
diff --git a/arch/m68k/include/asm/byteorder.h b/arch/m68k/include/uapi/asm/byteorder.h
index 31b260a88803..31b260a88803 100644
--- a/arch/m68k/include/asm/byteorder.h
+++ b/arch/m68k/include/uapi/asm/byteorder.h
diff --git a/arch/m68k/include/asm/cachectl.h b/arch/m68k/include/uapi/asm/cachectl.h
index 525978e959e3..525978e959e3 100644
--- a/arch/m68k/include/asm/cachectl.h
+++ b/arch/m68k/include/uapi/asm/cachectl.h
diff --git a/arch/m68k/include/asm/fcntl.h b/arch/m68k/include/uapi/asm/fcntl.h
index 1c369b20dc45..1c369b20dc45 100644
--- a/arch/m68k/include/asm/fcntl.h
+++ b/arch/m68k/include/uapi/asm/fcntl.h
diff --git a/arch/m68k/include/asm/ioctls.h b/arch/m68k/include/uapi/asm/ioctls.h
index 1332bb4ca5b0..1332bb4ca5b0 100644
--- a/arch/m68k/include/asm/ioctls.h
+++ b/arch/m68k/include/uapi/asm/ioctls.h
diff --git a/arch/m68k/include/asm/msgbuf.h b/arch/m68k/include/uapi/asm/msgbuf.h
index 243cb798de8f..243cb798de8f 100644
--- a/arch/m68k/include/asm/msgbuf.h
+++ b/arch/m68k/include/uapi/asm/msgbuf.h
diff --git a/arch/m68k/include/asm/param.h b/arch/m68k/include/uapi/asm/param.h
index 36265ccf5c7b..36265ccf5c7b 100644
--- a/arch/m68k/include/asm/param.h
+++ b/arch/m68k/include/uapi/asm/param.h
diff --git a/arch/m68k/include/asm/poll.h b/arch/m68k/include/uapi/asm/poll.h
index f080fcdb61bf..f080fcdb61bf 100644
--- a/arch/m68k/include/asm/poll.h
+++ b/arch/m68k/include/uapi/asm/poll.h
diff --git a/arch/m68k/include/asm/posix_types.h b/arch/m68k/include/uapi/asm/posix_types.h
index cf4dbf70fdc7..cf4dbf70fdc7 100644
--- a/arch/m68k/include/asm/posix_types.h
+++ b/arch/m68k/include/uapi/asm/posix_types.h
diff --git a/arch/m68k/include/uapi/asm/ptrace.h b/arch/m68k/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..caf92fd34939
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/ptrace.h
@@ -0,0 +1,79 @@
1#ifndef _UAPI_M68K_PTRACE_H
2#define _UAPI_M68K_PTRACE_H
3
4#define PT_D1 0
5#define PT_D2 1
6#define PT_D3 2
7#define PT_D4 3
8#define PT_D5 4
9#define PT_D6 5
10#define PT_D7 6
11#define PT_A0 7
12#define PT_A1 8
13#define PT_A2 9
14#define PT_A3 10
15#define PT_A4 11
16#define PT_A5 12
17#define PT_A6 13
18#define PT_D0 14
19#define PT_USP 15
20#define PT_ORIG_D0 16
21#define PT_SR 17
22#define PT_PC 18
23
24#ifndef __ASSEMBLY__
25
26/* this struct defines the way the registers are stored on the
27 stack during a system call. */
28
29struct pt_regs {
30 long d1;
31 long d2;
32 long d3;
33 long d4;
34 long d5;
35 long a0;
36 long a1;
37 long a2;
38 long d0;
39 long orig_d0;
40 long stkadj;
41#ifdef CONFIG_COLDFIRE
42 unsigned format : 4; /* frame format specifier */
43 unsigned vector : 12; /* vector offset */
44 unsigned short sr;
45 unsigned long pc;
46#else
47 unsigned short sr;
48 unsigned long pc;
49 unsigned format : 4; /* frame format specifier */
50 unsigned vector : 12; /* vector offset */
51#endif
52};
53
54/*
55 * This is the extended stack used by signal handlers and the context
56 * switcher: it's pushed after the normal "struct pt_regs".
57 */
58struct switch_stack {
59 unsigned long d6;
60 unsigned long d7;
61 unsigned long a3;
62 unsigned long a4;
63 unsigned long a5;
64 unsigned long a6;
65 unsigned long retpc;
66};
67
68/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
69#define PTRACE_GETREGS 12
70#define PTRACE_SETREGS 13
71#define PTRACE_GETFPREGS 14
72#define PTRACE_SETFPREGS 15
73
74#define PTRACE_GET_THREAD_AREA 25
75
76#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
77
78#endif /* __ASSEMBLY__ */
79#endif /* _UAPI_M68K_PTRACE_H */
diff --git a/arch/m68k/include/asm/sembuf.h b/arch/m68k/include/uapi/asm/sembuf.h
index 2308052a8c24..2308052a8c24 100644
--- a/arch/m68k/include/asm/sembuf.h
+++ b/arch/m68k/include/uapi/asm/sembuf.h
diff --git a/arch/m68k/include/uapi/asm/setup.h b/arch/m68k/include/uapi/asm/setup.h
new file mode 100644
index 000000000000..85579bff455c
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/setup.h
@@ -0,0 +1,103 @@
1/*
2** asm/setup.h -- Definition of the Linux/m68k setup information
3**
4** Copyright 1992 by Greg Harp
5**
6** This file is subject to the terms and conditions of the GNU General Public
7** License. See the file COPYING in the main directory of this archive
8** for more details.
9**
10** Created 09/29/92 by Greg Harp
11**
12** 5/2/94 Roman Hodek:
13** Added bi_atari part of the machine dependent union bi_un; for now it
14** contains just a model field to distinguish between TT and Falcon.
15** 26/7/96 Roman Zippel:
16** Renamed to setup.h; added some useful macros to allow gcc some
17** optimizations if possible.
18** 5/10/96 Geert Uytterhoeven:
19** Redesign of the boot information structure; moved boot information
20** structure to bootinfo.h
21*/
22
23#ifndef _UAPI_M68K_SETUP_H
24#define _UAPI_M68K_SETUP_H
25
26
27
28 /*
29 * Linux/m68k Architectures
30 */
31
32#define MACH_AMIGA 1
33#define MACH_ATARI 2
34#define MACH_MAC 3
35#define MACH_APOLLO 4
36#define MACH_SUN3 5
37#define MACH_MVME147 6
38#define MACH_MVME16x 7
39#define MACH_BVME6000 8
40#define MACH_HP300 9
41#define MACH_Q40 10
42#define MACH_SUN3X 11
43#define MACH_M54XX 12
44
45#define COMMAND_LINE_SIZE 256
46
47
48
49 /*
50 * CPU, FPU and MMU types
51 *
52 * Note: we may rely on the following equalities:
53 *
54 * CPU_68020 == MMU_68851
55 * CPU_68030 == MMU_68030
56 * CPU_68040 == FPU_68040 == MMU_68040
57 * CPU_68060 == FPU_68060 == MMU_68060
58 */
59
60#define CPUB_68020 0
61#define CPUB_68030 1
62#define CPUB_68040 2
63#define CPUB_68060 3
64#define CPUB_COLDFIRE 4
65
66#define CPU_68020 (1<<CPUB_68020)
67#define CPU_68030 (1<<CPUB_68030)
68#define CPU_68040 (1<<CPUB_68040)
69#define CPU_68060 (1<<CPUB_68060)
70#define CPU_COLDFIRE (1<<CPUB_COLDFIRE)
71
72#define FPUB_68881 0
73#define FPUB_68882 1
74#define FPUB_68040 2 /* Internal FPU */
75#define FPUB_68060 3 /* Internal FPU */
76#define FPUB_SUNFPA 4 /* Sun-3 FPA */
77#define FPUB_COLDFIRE 5 /* ColdFire FPU */
78
79#define FPU_68881 (1<<FPUB_68881)
80#define FPU_68882 (1<<FPUB_68882)
81#define FPU_68040 (1<<FPUB_68040)
82#define FPU_68060 (1<<FPUB_68060)
83#define FPU_SUNFPA (1<<FPUB_SUNFPA)
84#define FPU_COLDFIRE (1<<FPUB_COLDFIRE)
85
86#define MMUB_68851 0
87#define MMUB_68030 1 /* Internal MMU */
88#define MMUB_68040 2 /* Internal MMU */
89#define MMUB_68060 3 /* Internal MMU */
90#define MMUB_APOLLO 4 /* Custom Apollo */
91#define MMUB_SUN3 5 /* Custom Sun-3 */
92#define MMUB_COLDFIRE 6 /* Internal MMU */
93
94#define MMU_68851 (1<<MMUB_68851)
95#define MMU_68030 (1<<MMUB_68030)
96#define MMU_68040 (1<<MMUB_68040)
97#define MMU_68060 (1<<MMUB_68060)
98#define MMU_SUN3 (1<<MMUB_SUN3)
99#define MMU_APOLLO (1<<MMUB_APOLLO)
100#define MMU_COLDFIRE (1<<MMUB_COLDFIRE)
101
102
103#endif /* _UAPI_M68K_SETUP_H */
diff --git a/arch/m68k/include/asm/shmbuf.h b/arch/m68k/include/uapi/asm/shmbuf.h
index f8928d62f1b7..f8928d62f1b7 100644
--- a/arch/m68k/include/asm/shmbuf.h
+++ b/arch/m68k/include/uapi/asm/shmbuf.h
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/uapi/asm/sigcontext.h
index 523db2a51cf3..523db2a51cf3 100644
--- a/arch/m68k/include/asm/sigcontext.h
+++ b/arch/m68k/include/uapi/asm/sigcontext.h
diff --git a/arch/m68k/include/uapi/asm/signal.h b/arch/m68k/include/uapi/asm/signal.h
new file mode 100644
index 000000000000..2b450f311bd9
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/signal.h
@@ -0,0 +1,118 @@
1#ifndef _UAPI_M68K_SIGNAL_H
2#define _UAPI_M68K_SIGNAL_H
3
4#include <linux/types.h>
5
6/* Avoid too many header ordering problems. */
7struct siginfo;
8
9#ifndef __KERNEL__
10/* Here we must cater to libcs that poke about in kernel headers. */
11
12#define NSIG 32
13typedef unsigned long sigset_t;
14
15#endif /* __KERNEL__ */
16
17#define SIGHUP 1
18#define SIGINT 2
19#define SIGQUIT 3
20#define SIGILL 4
21#define SIGTRAP 5
22#define SIGABRT 6
23#define SIGIOT 6
24#define SIGBUS 7
25#define SIGFPE 8
26#define SIGKILL 9
27#define SIGUSR1 10
28#define SIGSEGV 11
29#define SIGUSR2 12
30#define SIGPIPE 13
31#define SIGALRM 14
32#define SIGTERM 15
33#define SIGSTKFLT 16
34#define SIGCHLD 17
35#define SIGCONT 18
36#define SIGSTOP 19
37#define SIGTSTP 20
38#define SIGTTIN 21
39#define SIGTTOU 22
40#define SIGURG 23
41#define SIGXCPU 24
42#define SIGXFSZ 25
43#define SIGVTALRM 26
44#define SIGPROF 27
45#define SIGWINCH 28
46#define SIGIO 29
47#define SIGPOLL SIGIO
48/*
49#define SIGLOST 29
50*/
51#define SIGPWR 30
52#define SIGSYS 31
53#define SIGUNUSED 31
54
55/* These should not be considered constants from userland. */
56#define SIGRTMIN 32
57#define SIGRTMAX _NSIG
58
59/*
60 * SA_FLAGS values:
61 *
62 * SA_ONSTACK indicates that a registered stack_t will be used.
63 * SA_RESTART flag to get restarting signals (which were the default long ago)
64 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
65 * SA_RESETHAND clears the handler when the signal is delivered.
66 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
67 * SA_NODEFER prevents the current signal from being masked in the handler.
68 *
69 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
70 * Unix names RESETHAND and NODEFER respectively.
71 */
72#define SA_NOCLDSTOP 0x00000001
73#define SA_NOCLDWAIT 0x00000002
74#define SA_SIGINFO 0x00000004
75#define SA_ONSTACK 0x08000000
76#define SA_RESTART 0x10000000
77#define SA_NODEFER 0x40000000
78#define SA_RESETHAND 0x80000000
79
80#define SA_NOMASK SA_NODEFER
81#define SA_ONESHOT SA_RESETHAND
82
83/*
84 * sigaltstack controls
85 */
86#define SS_ONSTACK 1
87#define SS_DISABLE 2
88
89#define MINSIGSTKSZ 2048
90#define SIGSTKSZ 8192
91
92#include <asm-generic/signal-defs.h>
93
94#ifndef __KERNEL__
95/* Here we must cater to libcs that poke about in kernel headers. */
96
97struct sigaction {
98 union {
99 __sighandler_t _sa_handler;
100 void (*_sa_sigaction)(int, struct siginfo *, void *);
101 } _u;
102 sigset_t sa_mask;
103 unsigned long sa_flags;
104 void (*sa_restorer)(void);
105};
106
107#define sa_handler _u._sa_handler
108#define sa_sigaction _u._sa_sigaction
109
110#endif /* __KERNEL__ */
111
112typedef struct sigaltstack {
113 void __user *ss_sp;
114 int ss_flags;
115 size_t ss_size;
116} stack_t;
117
118#endif /* _UAPI_M68K_SIGNAL_H */
diff --git a/arch/m68k/include/asm/socket.h b/arch/m68k/include/uapi/asm/socket.h
index d1be684edf97..d1be684edf97 100644
--- a/arch/m68k/include/asm/socket.h
+++ b/arch/m68k/include/uapi/asm/socket.h
diff --git a/arch/m68k/include/asm/sockios.h b/arch/m68k/include/uapi/asm/sockios.h
index c04a23943cb7..c04a23943cb7 100644
--- a/arch/m68k/include/asm/sockios.h
+++ b/arch/m68k/include/uapi/asm/sockios.h
diff --git a/arch/m68k/include/asm/stat.h b/arch/m68k/include/uapi/asm/stat.h
index dd38bc2e9f98..dd38bc2e9f98 100644
--- a/arch/m68k/include/asm/stat.h
+++ b/arch/m68k/include/uapi/asm/stat.h
diff --git a/arch/m68k/include/asm/swab.h b/arch/m68k/include/uapi/asm/swab.h
index b7b37a40defc..b7b37a40defc 100644
--- a/arch/m68k/include/asm/swab.h
+++ b/arch/m68k/include/uapi/asm/swab.h
diff --git a/arch/m68k/include/asm/termbits.h b/arch/m68k/include/uapi/asm/termbits.h
index aea1e37b765a..aea1e37b765a 100644
--- a/arch/m68k/include/asm/termbits.h
+++ b/arch/m68k/include/uapi/asm/termbits.h
diff --git a/arch/m68k/include/uapi/asm/termios.h b/arch/m68k/include/uapi/asm/termios.h
new file mode 100644
index 000000000000..ce2142c9ac1d
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/termios.h
@@ -0,0 +1,44 @@
1#ifndef _UAPI_M68K_TERMIOS_H
2#define _UAPI_M68K_TERMIOS_H
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24
25/* modem lines */
26#define TIOCM_LE 0x001
27#define TIOCM_DTR 0x002
28#define TIOCM_RTS 0x004
29#define TIOCM_ST 0x008
30#define TIOCM_SR 0x010
31#define TIOCM_CTS 0x020
32#define TIOCM_CAR 0x040
33#define TIOCM_RNG 0x080
34#define TIOCM_DSR 0x100
35#define TIOCM_CD TIOCM_CAR
36#define TIOCM_RI TIOCM_RNG
37#define TIOCM_OUT1 0x2000
38#define TIOCM_OUT2 0x4000
39#define TIOCM_LOOP 0x8000
40
41/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
42
43
44#endif /* _UAPI_M68K_TERMIOS_H */
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..b94bfbf90705
--- /dev/null
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -0,0 +1,357 @@
1#ifndef _UAPI_ASM_M68K_UNISTD_H_
2#define _UAPI_ASM_M68K_UNISTD_H_
3
4/*
5 * This file contains the system call numbers.
6 */
7
8#define __NR_restart_syscall 0
9#define __NR_exit 1
10#define __NR_fork 2
11#define __NR_read 3
12#define __NR_write 4
13#define __NR_open 5
14#define __NR_close 6
15#define __NR_waitpid 7
16#define __NR_creat 8
17#define __NR_link 9
18#define __NR_unlink 10
19#define __NR_execve 11
20#define __NR_chdir 12
21#define __NR_time 13
22#define __NR_mknod 14
23#define __NR_chmod 15
24#define __NR_chown 16
25/*#define __NR_break 17*/
26#define __NR_oldstat 18
27#define __NR_lseek 19
28#define __NR_getpid 20
29#define __NR_mount 21
30#define __NR_umount 22
31#define __NR_setuid 23
32#define __NR_getuid 24
33#define __NR_stime 25
34#define __NR_ptrace 26
35#define __NR_alarm 27
36#define __NR_oldfstat 28
37#define __NR_pause 29
38#define __NR_utime 30
39/*#define __NR_stty 31*/
40/*#define __NR_gtty 32*/
41#define __NR_access 33
42#define __NR_nice 34
43/*#define __NR_ftime 35*/
44#define __NR_sync 36
45#define __NR_kill 37
46#define __NR_rename 38
47#define __NR_mkdir 39
48#define __NR_rmdir 40
49#define __NR_dup 41
50#define __NR_pipe 42
51#define __NR_times 43
52/*#define __NR_prof 44*/
53#define __NR_brk 45
54#define __NR_setgid 46
55#define __NR_getgid 47
56#define __NR_signal 48
57#define __NR_geteuid 49
58#define __NR_getegid 50
59#define __NR_acct 51
60#define __NR_umount2 52
61/*#define __NR_lock 53*/
62#define __NR_ioctl 54
63#define __NR_fcntl 55
64/*#define __NR_mpx 56*/
65#define __NR_setpgid 57
66/*#define __NR_ulimit 58*/
67/*#define __NR_oldolduname 59*/
68#define __NR_umask 60
69#define __NR_chroot 61
70#define __NR_ustat 62
71#define __NR_dup2 63
72#define __NR_getppid 64
73#define __NR_getpgrp 65
74#define __NR_setsid 66
75#define __NR_sigaction 67
76#define __NR_sgetmask 68
77#define __NR_ssetmask 69
78#define __NR_setreuid 70
79#define __NR_setregid 71
80#define __NR_sigsuspend 72
81#define __NR_sigpending 73
82#define __NR_sethostname 74
83#define __NR_setrlimit 75
84#define __NR_getrlimit 76
85#define __NR_getrusage 77
86#define __NR_gettimeofday 78
87#define __NR_settimeofday 79
88#define __NR_getgroups 80
89#define __NR_setgroups 81
90#define __NR_select 82
91#define __NR_symlink 83
92#define __NR_oldlstat 84
93#define __NR_readlink 85
94#define __NR_uselib 86
95#define __NR_swapon 87
96#define __NR_reboot 88
97#define __NR_readdir 89
98#define __NR_mmap 90
99#define __NR_munmap 91
100#define __NR_truncate 92
101#define __NR_ftruncate 93
102#define __NR_fchmod 94
103#define __NR_fchown 95
104#define __NR_getpriority 96
105#define __NR_setpriority 97
106/*#define __NR_profil 98*/
107#define __NR_statfs 99
108#define __NR_fstatfs 100
109/*#define __NR_ioperm 101*/
110#define __NR_socketcall 102
111#define __NR_syslog 103
112#define __NR_setitimer 104
113#define __NR_getitimer 105
114#define __NR_stat 106
115#define __NR_lstat 107
116#define __NR_fstat 108
117/*#define __NR_olduname 109*/
118/*#define __NR_iopl 110*/ /* not supported */
119#define __NR_vhangup 111
120/*#define __NR_idle 112*/ /* Obsolete */
121/*#define __NR_vm86 113*/ /* not supported */
122#define __NR_wait4 114
123#define __NR_swapoff 115
124#define __NR_sysinfo 116
125#define __NR_ipc 117
126#define __NR_fsync 118
127#define __NR_sigreturn 119
128#define __NR_clone 120
129#define __NR_setdomainname 121
130#define __NR_uname 122
131#define __NR_cacheflush 123
132#define __NR_adjtimex 124
133#define __NR_mprotect 125
134#define __NR_sigprocmask 126
135#define __NR_create_module 127
136#define __NR_init_module 128
137#define __NR_delete_module 129
138#define __NR_get_kernel_syms 130
139#define __NR_quotactl 131
140#define __NR_getpgid 132
141#define __NR_fchdir 133
142#define __NR_bdflush 134
143#define __NR_sysfs 135
144#define __NR_personality 136
145/*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */
146#define __NR_setfsuid 138
147#define __NR_setfsgid 139
148#define __NR__llseek 140
149#define __NR_getdents 141
150#define __NR__newselect 142
151#define __NR_flock 143
152#define __NR_msync 144
153#define __NR_readv 145
154#define __NR_writev 146
155#define __NR_getsid 147
156#define __NR_fdatasync 148
157#define __NR__sysctl 149
158#define __NR_mlock 150
159#define __NR_munlock 151
160#define __NR_mlockall 152
161#define __NR_munlockall 153
162#define __NR_sched_setparam 154
163#define __NR_sched_getparam 155
164#define __NR_sched_setscheduler 156
165#define __NR_sched_getscheduler 157
166#define __NR_sched_yield 158
167#define __NR_sched_get_priority_max 159
168#define __NR_sched_get_priority_min 160
169#define __NR_sched_rr_get_interval 161
170#define __NR_nanosleep 162
171#define __NR_mremap 163
172#define __NR_setresuid 164
173#define __NR_getresuid 165
174#define __NR_getpagesize 166
175#define __NR_query_module 167
176#define __NR_poll 168
177#define __NR_nfsservctl 169
178#define __NR_setresgid 170
179#define __NR_getresgid 171
180#define __NR_prctl 172
181#define __NR_rt_sigreturn 173
182#define __NR_rt_sigaction 174
183#define __NR_rt_sigprocmask 175
184#define __NR_rt_sigpending 176
185#define __NR_rt_sigtimedwait 177
186#define __NR_rt_sigqueueinfo 178
187#define __NR_rt_sigsuspend 179
188#define __NR_pread64 180
189#define __NR_pwrite64 181
190#define __NR_lchown 182
191#define __NR_getcwd 183
192#define __NR_capget 184
193#define __NR_capset 185
194#define __NR_sigaltstack 186
195#define __NR_sendfile 187
196#define __NR_getpmsg 188 /* some people actually want streams */
197#define __NR_putpmsg 189 /* some people actually want streams */
198#define __NR_vfork 190
199#define __NR_ugetrlimit 191
200#define __NR_mmap2 192
201#define __NR_truncate64 193
202#define __NR_ftruncate64 194
203#define __NR_stat64 195
204#define __NR_lstat64 196
205#define __NR_fstat64 197
206#define __NR_chown32 198
207#define __NR_getuid32 199
208#define __NR_getgid32 200
209#define __NR_geteuid32 201
210#define __NR_getegid32 202
211#define __NR_setreuid32 203
212#define __NR_setregid32 204
213#define __NR_getgroups32 205
214#define __NR_setgroups32 206
215#define __NR_fchown32 207
216#define __NR_setresuid32 208
217#define __NR_getresuid32 209
218#define __NR_setresgid32 210
219#define __NR_getresgid32 211
220#define __NR_lchown32 212
221#define __NR_setuid32 213
222#define __NR_setgid32 214
223#define __NR_setfsuid32 215
224#define __NR_setfsgid32 216
225#define __NR_pivot_root 217
226/* 218*/
227/* 219*/
228#define __NR_getdents64 220
229#define __NR_gettid 221
230#define __NR_tkill 222
231#define __NR_setxattr 223
232#define __NR_lsetxattr 224
233#define __NR_fsetxattr 225
234#define __NR_getxattr 226
235#define __NR_lgetxattr 227
236#define __NR_fgetxattr 228
237#define __NR_listxattr 229
238#define __NR_llistxattr 230
239#define __NR_flistxattr 231
240#define __NR_removexattr 232
241#define __NR_lremovexattr 233
242#define __NR_fremovexattr 234
243#define __NR_futex 235
244#define __NR_sendfile64 236
245#define __NR_mincore 237
246#define __NR_madvise 238
247#define __NR_fcntl64 239
248#define __NR_readahead 240
249#define __NR_io_setup 241
250#define __NR_io_destroy 242
251#define __NR_io_getevents 243
252#define __NR_io_submit 244
253#define __NR_io_cancel 245
254#define __NR_fadvise64 246
255#define __NR_exit_group 247
256#define __NR_lookup_dcookie 248
257#define __NR_epoll_create 249
258#define __NR_epoll_ctl 250
259#define __NR_epoll_wait 251
260#define __NR_remap_file_pages 252
261#define __NR_set_tid_address 253
262#define __NR_timer_create 254
263#define __NR_timer_settime 255
264#define __NR_timer_gettime 256
265#define __NR_timer_getoverrun 257
266#define __NR_timer_delete 258
267#define __NR_clock_settime 259
268#define __NR_clock_gettime 260
269#define __NR_clock_getres 261
270#define __NR_clock_nanosleep 262
271#define __NR_statfs64 263
272#define __NR_fstatfs64 264
273#define __NR_tgkill 265
274#define __NR_utimes 266
275#define __NR_fadvise64_64 267
276#define __NR_mbind 268
277#define __NR_get_mempolicy 269
278#define __NR_set_mempolicy 270
279#define __NR_mq_open 271
280#define __NR_mq_unlink 272
281#define __NR_mq_timedsend 273
282#define __NR_mq_timedreceive 274
283#define __NR_mq_notify 275
284#define __NR_mq_getsetattr 276
285#define __NR_waitid 277
286/*#define __NR_vserver 278*/
287#define __NR_add_key 279
288#define __NR_request_key 280
289#define __NR_keyctl 281
290#define __NR_ioprio_set 282
291#define __NR_ioprio_get 283
292#define __NR_inotify_init 284
293#define __NR_inotify_add_watch 285
294#define __NR_inotify_rm_watch 286
295#define __NR_migrate_pages 287
296#define __NR_openat 288
297#define __NR_mkdirat 289
298#define __NR_mknodat 290
299#define __NR_fchownat 291
300#define __NR_futimesat 292
301#define __NR_fstatat64 293
302#define __NR_unlinkat 294
303#define __NR_renameat 295
304#define __NR_linkat 296
305#define __NR_symlinkat 297
306#define __NR_readlinkat 298
307#define __NR_fchmodat 299
308#define __NR_faccessat 300
309#define __NR_pselect6 301
310#define __NR_ppoll 302
311#define __NR_unshare 303
312#define __NR_set_robust_list 304
313#define __NR_get_robust_list 305
314#define __NR_splice 306
315#define __NR_sync_file_range 307
316#define __NR_tee 308
317#define __NR_vmsplice 309
318#define __NR_move_pages 310
319#define __NR_sched_setaffinity 311
320#define __NR_sched_getaffinity 312
321#define __NR_kexec_load 313
322#define __NR_getcpu 314
323#define __NR_epoll_pwait 315
324#define __NR_utimensat 316
325#define __NR_signalfd 317
326#define __NR_timerfd_create 318
327#define __NR_eventfd 319
328#define __NR_fallocate 320
329#define __NR_timerfd_settime 321
330#define __NR_timerfd_gettime 322
331#define __NR_signalfd4 323
332#define __NR_eventfd2 324
333#define __NR_epoll_create1 325
334#define __NR_dup3 326
335#define __NR_pipe2 327
336#define __NR_inotify_init1 328
337#define __NR_preadv 329
338#define __NR_pwritev 330
339#define __NR_rt_tgsigqueueinfo 331
340#define __NR_perf_event_open 332
341#define __NR_get_thread_area 333
342#define __NR_set_thread_area 334
343#define __NR_atomic_cmpxchg_32 335
344#define __NR_atomic_barrier 336
345#define __NR_fanotify_init 337
346#define __NR_fanotify_mark 338
347#define __NR_prlimit64 339
348#define __NR_name_to_handle_at 340
349#define __NR_open_by_handle_at 341
350#define __NR_clock_adjtime 342
351#define __NR_syncfs 343
352#define __NR_setns 344
353#define __NR_process_vm_readv 345
354#define __NR_process_vm_writev 346
355#define __NR_kcmp 347
356
357#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index ce827b376110..4fc2e29b771b 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -367,4 +367,5 @@ ENTRY(sys_call_table)
367 .long sys_setns 367 .long sys_setns
368 .long sys_process_vm_readv /* 345 */ 368 .long sys_process_vm_readv /* 345 */
369 .long sys_process_vm_writev 369 .long sys_process_vm_writev
370 .long sys_kcmp
370 371
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3f3d9ca7a5b6..5dba755a43e6 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -130,6 +130,7 @@ config S390
130 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 130 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
131 select HAVE_UID16 if 32BIT 131 select HAVE_UID16 if 32BIT
132 select ARCH_WANT_IPC_PARSE_VERSION 132 select ARCH_WANT_IPC_PARSE_VERSION
133 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
133 select GENERIC_SMP_IDLE_THREAD 134 select GENERIC_SMP_IDLE_THREAD
134 select GENERIC_TIME_VSYSCALL_OLD 135 select GENERIC_TIME_VSYSCALL_OLD
135 select GENERIC_CLOCKEVENTS 136 select GENERIC_CLOCKEVENTS
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index d80f79d8dd9c..8e1fb8239287 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -5,7 +5,7 @@ OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
5OUTPUT_ARCH(s390:64-bit) 5OUTPUT_ARCH(s390:64-bit)
6#else 6#else
7OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 7OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
8OUTPUT_ARCH(s390) 8OUTPUT_ARCH(s390:31-bit)
9#endif 9#endif
10 10
11ENTRY(startup) 11ENTRY(startup)
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 7941968e12b4..5f0173a31693 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -9,7 +9,7 @@
9#include <asm/cpu_mf.h> 9#include <asm/cpu_mf.h>
10 10
11/* CPU-measurement counter facility */ 11/* CPU-measurement counter facility */
12#define PERF_CPUM_CF_MAX_CTR 160 12#define PERF_CPUM_CF_MAX_CTR 256
13 13
14/* Per-CPU flags for PMU states */ 14/* Per-CPU flags for PMU states */
15#define PMU_F_RESERVED 0x1000 15#define PMU_F_RESERVED 0x1000
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 59b67ed423b4..7bf68fff7c5d 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -1,8 +1,6 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += kvm_para.h
5
6header-y += auxvec.h 4header-y += auxvec.h
7header-y += bitsperlong.h 5header-y += bitsperlong.h
8header-y += byteorder.h 6header-y += byteorder.h
diff --git a/arch/s390/include/uapi/asm/chpid.h b/arch/s390/include/uapi/asm/chpid.h
index 581992dfae27..6b4fb29cc197 100644
--- a/arch/s390/include/uapi/asm/chpid.h
+++ b/arch/s390/include/uapi/asm/chpid.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2007 2 * Copyright IBM Corp. 2007, 2012
3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
4 */ 4 */
5 5
@@ -12,10 +12,10 @@
12#define __MAX_CHPID 255 12#define __MAX_CHPID 255
13 13
14struct chp_id { 14struct chp_id {
15 u8 reserved1; 15 __u8 reserved1;
16 u8 cssid; 16 __u8 cssid;
17 u8 reserved2; 17 __u8 reserved2;
18 u8 id; 18 __u8 id;
19} __attribute__((packed)); 19} __attribute__((packed));
20 20
21 21
diff --git a/arch/s390/include/uapi/asm/kvm_para.h b/arch/s390/include/uapi/asm/kvm_para.h
new file mode 100644
index 000000000000..ff1f4e7b3015
--- /dev/null
+++ b/arch/s390/include/uapi/asm/kvm_para.h
@@ -0,0 +1,11 @@
1/*
2 * User API definitions for paravirtual devices on s390
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
11 */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 8df8d8a19c98..64b24650e4f8 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -59,8 +59,8 @@ enum {
59 59
60enum { 60enum {
61 CACHE_TI_UNIFIED = 0, 61 CACHE_TI_UNIFIED = 0,
62 CACHE_TI_INSTRUCTION = 0, 62 CACHE_TI_DATA = 0,
63 CACHE_TI_DATA, 63 CACHE_TI_INSTRUCTION,
64}; 64};
65 65
66struct cache_info { 66struct cache_info {
@@ -121,7 +121,10 @@ static int __init cache_add(int level, int private, int type)
121 cache = kzalloc(sizeof(*cache), GFP_KERNEL); 121 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
122 if (!cache) 122 if (!cache)
123 return -ENOMEM; 123 return -ENOMEM;
124 ti = type == CACHE_TYPE_DATA ? CACHE_TI_DATA : CACHE_TI_UNIFIED; 124 if (type == CACHE_TYPE_INSTRUCTION)
125 ti = CACHE_TI_INSTRUCTION;
126 else
127 ti = CACHE_TI_UNIFIED;
125 cache->size = ecag(EXTRACT_SIZE, level, ti); 128 cache->size = ecag(EXTRACT_SIZE, level, ti);
126 cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti); 129 cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
127 cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti); 130 cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
index acaaaf4b7055..085a95eb315f 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/kernel/head_kdump.S
@@ -85,16 +85,10 @@
85.align 2 85.align 2
86startup_kdump_relocated: 86startup_kdump_relocated:
87 basr %r13,0 87 basr %r13,0
880: 880: lpswe .Lrestart_psw-0b(%r13) # Start new kernel...
89 mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW
90 sam31 # Switch to 31 bit addr mode
91 sr %r1,%r1 # Erase register r1
92 sr %r2,%r2 # Erase register r2
93 sigp %r1,%r2,SIGP_SET_ARCHITECTURE # Switch to 31 bit arch mode
94 lpsw 0 # Start new kernel...
95.align 8 89.align 8
96.Lrestart_psw: 90.Lrestart_psw:
97 .long 0x00080000,0x80000000 + startup 91 .quad 0x0000000080000000,0x0000000000000000 + startup
98#else 92#else
99.align 2 93.align 2
100.Lep_startup_kdump: 94.Lep_startup_kdump:
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 9871b1971ed7..c4e7269d4a09 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -94,7 +94,7 @@ static int get_counter_set(u64 event)
94 set = CPUMF_CTR_SET_USER; 94 set = CPUMF_CTR_SET_USER;
95 else if (event < 128) 95 else if (event < 128)
96 set = CPUMF_CTR_SET_CRYPTO; 96 set = CPUMF_CTR_SET_CRYPTO;
97 else if (event < 160) 97 else if (event < 256)
98 set = CPUMF_CTR_SET_EXT; 98 set = CPUMF_CTR_SET_EXT;
99 99
100 return set; 100 return set;
@@ -138,6 +138,10 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
138 case CPUMF_CTR_SET_EXT: 138 case CPUMF_CTR_SET_EXT:
139 if (cpuhw->info.csvn < 1) 139 if (cpuhw->info.csvn < 1)
140 err = -EOPNOTSUPP; 140 err = -EOPNOTSUPP;
141 if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
142 (cpuhw->info.csvn == 2 && hwc->config > 175) ||
143 (cpuhw->info.csvn > 2 && hwc->config > 255))
144 err = -EOPNOTSUPP;
141 break; 145 break;
142 } 146 }
143 147
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index de8fa9bbd35e..79cb51adc741 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -8,7 +8,7 @@
8 8
9#ifndef CONFIG_64BIT 9#ifndef CONFIG_64BIT
10OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 10OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
11OUTPUT_ARCH(s390) 11OUTPUT_ARCH(s390:31-bit)
12ENTRY(startup) 12ENTRY(startup)
13jiffies = jiffies_64 + 4; 13jiffies = jiffies_64 + 4;
14#else 14#else
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 55640cf92597..3d15364c6071 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
26 endif 26 endif
27endif 27endif
28 28
29# The tile compiler may emit .eh_frame information for backtracing.
30# In kernel modules, this causes load failures due to unsupported relocations.
31KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
32
29ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"") 33ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
30KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) 34KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
31endif 35endif
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 001cbfa10ac6..243ffebe38d6 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -24,16 +24,6 @@
24#include <asm/homecache.h> 24#include <asm/homecache.h>
25#include <arch/opcode.h> 25#include <arch/opcode.h>
26 26
27#ifdef __tilegx__
28# define Elf_Rela Elf64_Rela
29# define ELF_R_SYM ELF64_R_SYM
30# define ELF_R_TYPE ELF64_R_TYPE
31#else
32# define Elf_Rela Elf32_Rela
33# define ELF_R_SYM ELF32_R_SYM
34# define ELF_R_TYPE ELF32_R_TYPE
35#endif
36
37#ifdef MODULE_DEBUG 27#ifdef MODULE_DEBUG
38#define DEBUGP printk 28#define DEBUGP printk
39#else 29#else
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index c9dcc181d4d1..6e8fdf5ad113 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -35,7 +35,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
35#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 35#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
36 efi_call_virt(f, a1, a2, a3, a4, a5, a6) 36 efi_call_virt(f, a1, a2, a3, a4, a5, a6)
37 37
38#define efi_ioremap(addr, size, type) ioremap_cache(addr, size) 38#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
39 39
40#else /* !CONFIG_X86_32 */ 40#else /* !CONFIG_X86_32 */
41 41
@@ -89,7 +89,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
90 90
91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
92 u32 type); 92 u32 type, u64 attribute);
93 93
94#endif /* CONFIG_X86_32 */ 94#endif /* CONFIG_X86_32 */
95 95
@@ -98,6 +98,8 @@ extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
98extern int efi_memblock_x86_reserve_range(void); 98extern int efi_memblock_x86_reserve_range(void);
99extern void efi_call_phys_prelog(void); 99extern void efi_call_phys_prelog(void);
100extern void efi_call_phys_epilog(void); 100extern void efi_call_phys_epilog(void);
101extern void efi_unmap_memmap(void);
102extern void efi_memory_uc(u64 addr, unsigned long size);
101 103
102#ifndef CONFIG_EFI 104#ifndef CONFIG_EFI
103/* 105/*
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 6d2f75a82a14..54d52ff1304a 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -51,14 +51,14 @@
51 * with Xen so that on ARM we can have one ABI that works for 32 and 64 51 * with Xen so that on ARM we can have one ABI that works for 32 and 64
52 * bit guests. */ 52 * bit guests. */
53typedef unsigned long xen_pfn_t; 53typedef unsigned long xen_pfn_t;
54#define PRI_xen_pfn "lx"
54typedef unsigned long xen_ulong_t; 55typedef unsigned long xen_ulong_t;
56#define PRI_xen_ulong "lx"
55/* Guest handles for primitive C types. */ 57/* Guest handles for primitive C types. */
56__DEFINE_GUEST_HANDLE(uchar, unsigned char); 58__DEFINE_GUEST_HANDLE(uchar, unsigned char);
57__DEFINE_GUEST_HANDLE(uint, unsigned int); 59__DEFINE_GUEST_HANDLE(uint, unsigned int);
58__DEFINE_GUEST_HANDLE(ulong, unsigned long);
59DEFINE_GUEST_HANDLE(char); 60DEFINE_GUEST_HANDLE(char);
60DEFINE_GUEST_HANDLE(int); 61DEFINE_GUEST_HANDLE(int);
61DEFINE_GUEST_HANDLE(long);
62DEFINE_GUEST_HANDLE(void); 62DEFINE_GUEST_HANDLE(void);
63DEFINE_GUEST_HANDLE(uint64_t); 63DEFINE_GUEST_HANDLE(uint64_t);
64DEFINE_GUEST_HANDLE(uint32_t); 64DEFINE_GUEST_HANDLE(uint32_t);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c265593ec2cd..1817fa911024 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2257,6 +2257,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2257 continue; 2257 continue;
2258 2258
2259 cfg = irq_cfg(irq); 2259 cfg = irq_cfg(irq);
2260 if (!cfg)
2261 continue;
2262
2260 raw_spin_lock(&desc->lock); 2263 raw_spin_lock(&desc->lock);
2261 2264
2262 /* 2265 /*
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3373f84d1397..4a3374e61a93 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -208,12 +208,14 @@ static bool check_hw_exists(void)
208 } 208 }
209 209
210 /* 210 /*
211 * Now write a value and read it back to see if it matches, 211 * Read the current value, change it and read it back to see if it
212 * this is needed to detect certain hardware emulators (qemu/kvm) 212 * matches, this is needed to detect certain hardware emulators
213 * that don't trap on the MSR access and always return 0s. 213 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
214 */ 214 */
215 val = 0xabcdUL;
216 reg = x86_pmu_event_addr(0); 215 reg = x86_pmu_event_addr(0);
216 if (rdmsrl_safe(reg, &val))
217 goto msr_fail;
218 val ^= 0xffffUL;
217 ret = wrmsrl_safe(reg, val); 219 ret = wrmsrl_safe(reg, val);
218 ret |= rdmsrl_safe(reg, &val_new); 220 ret |= rdmsrl_safe(reg, &val_new);
219 if (ret || val != val_new) 221 if (ret || val != val_new)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 99d96a4978b5..3cf3d97cce3a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -118,22 +118,24 @@ static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
118{ 118{
119 struct pci_dev *pdev = box->pci_dev; 119 struct pci_dev *pdev = box->pci_dev;
120 int box_ctl = uncore_pci_box_ctl(box); 120 int box_ctl = uncore_pci_box_ctl(box);
121 u32 config; 121 u32 config = 0;
122 122
123 pci_read_config_dword(pdev, box_ctl, &config); 123 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
124 config |= SNBEP_PMON_BOX_CTL_FRZ; 124 config |= SNBEP_PMON_BOX_CTL_FRZ;
125 pci_write_config_dword(pdev, box_ctl, config); 125 pci_write_config_dword(pdev, box_ctl, config);
126 }
126} 127}
127 128
128static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) 129static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
129{ 130{
130 struct pci_dev *pdev = box->pci_dev; 131 struct pci_dev *pdev = box->pci_dev;
131 int box_ctl = uncore_pci_box_ctl(box); 132 int box_ctl = uncore_pci_box_ctl(box);
132 u32 config; 133 u32 config = 0;
133 134
134 pci_read_config_dword(pdev, box_ctl, &config); 135 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
135 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 136 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
136 pci_write_config_dword(pdev, box_ctl, config); 137 pci_write_config_dword(pdev, box_ctl, config);
138 }
137} 139}
138 140
139static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) 141static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
@@ -156,7 +158,7 @@ static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct pe
156{ 158{
157 struct pci_dev *pdev = box->pci_dev; 159 struct pci_dev *pdev = box->pci_dev;
158 struct hw_perf_event *hwc = &event->hw; 160 struct hw_perf_event *hwc = &event->hw;
159 u64 count; 161 u64 count = 0;
160 162
161 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); 163 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
162 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); 164 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
@@ -603,11 +605,12 @@ static struct pci_driver snbep_uncore_pci_driver = {
603/* 605/*
604 * build pci bus to socket mapping 606 * build pci bus to socket mapping
605 */ 607 */
606static void snbep_pci2phy_map_init(void) 608static int snbep_pci2phy_map_init(void)
607{ 609{
608 struct pci_dev *ubox_dev = NULL; 610 struct pci_dev *ubox_dev = NULL;
609 int i, bus, nodeid; 611 int i, bus, nodeid;
610 u32 config; 612 int err = 0;
613 u32 config = 0;
611 614
612 while (1) { 615 while (1) {
613 /* find the UBOX device */ 616 /* find the UBOX device */
@@ -618,10 +621,14 @@ static void snbep_pci2phy_map_init(void)
618 break; 621 break;
619 bus = ubox_dev->bus->number; 622 bus = ubox_dev->bus->number;
620 /* get the Node ID of the local register */ 623 /* get the Node ID of the local register */
621 pci_read_config_dword(ubox_dev, 0x40, &config); 624 err = pci_read_config_dword(ubox_dev, 0x40, &config);
625 if (err)
626 break;
622 nodeid = config; 627 nodeid = config;
623 /* get the Node ID mapping */ 628 /* get the Node ID mapping */
624 pci_read_config_dword(ubox_dev, 0x54, &config); 629 err = pci_read_config_dword(ubox_dev, 0x54, &config);
630 if (err)
631 break;
625 /* 632 /*
626 * every three bits in the Node ID mapping register maps 633 * every three bits in the Node ID mapping register maps
627 * to a particular node. 634 * to a particular node.
@@ -633,7 +640,11 @@ static void snbep_pci2phy_map_init(void)
633 } 640 }
634 } 641 }
635 }; 642 };
636 return; 643
644 if (ubox_dev)
645 pci_dev_put(ubox_dev);
646
647 return err ? pcibios_err_to_errno(err) : 0;
637} 648}
638/* end of Sandy Bridge-EP uncore support */ 649/* end of Sandy Bridge-EP uncore support */
639 650
@@ -1547,7 +1558,6 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1547{ 1558{
1548 struct hw_perf_event *hwc = &event->hw; 1559 struct hw_perf_event *hwc = &event->hw;
1549 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1560 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1550 int port;
1551 1561
1552 /* adjust the main event selector and extra register index */ 1562 /* adjust the main event selector and extra register index */
1553 if (reg1->idx % 2) { 1563 if (reg1->idx % 2) {
@@ -1559,7 +1569,6 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1559 } 1569 }
1560 1570
1561 /* adjust extra register config */ 1571 /* adjust extra register config */
1562 port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
1563 switch (reg1->idx % 6) { 1572 switch (reg1->idx % 6) {
1564 case 2: 1573 case 2:
1565 /* shift the 8~15 bits to the 0~7 bits */ 1574 /* shift the 8~15 bits to the 0~7 bits */
@@ -2578,9 +2587,11 @@ static int __init uncore_pci_init(void)
2578 2587
2579 switch (boot_cpu_data.x86_model) { 2588 switch (boot_cpu_data.x86_model) {
2580 case 45: /* Sandy Bridge-EP */ 2589 case 45: /* Sandy Bridge-EP */
2590 ret = snbep_pci2phy_map_init();
2591 if (ret)
2592 return ret;
2581 pci_uncores = snbep_pci_uncores; 2593 pci_uncores = snbep_pci_uncores;
2582 uncore_pci_driver = &snbep_uncore_pci_driver; 2594 uncore_pci_driver = &snbep_uncore_pci_driver;
2583 snbep_pci2phy_map_init();
2584 break; 2595 break;
2585 default: 2596 default:
2586 return 0; 2597 return 0;
@@ -2926,6 +2937,9 @@ static int __init intel_uncore_init(void)
2926 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 2937 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2927 return -ENODEV; 2938 return -ENODEV;
2928 2939
2940 if (cpu_has_hypervisor)
2941 return -ENODEV;
2942
2929 ret = uncore_pci_init(); 2943 ret = uncore_pci_init();
2930 if (ret) 2944 if (ret)
2931 goto fail; 2945 goto fail;
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 7c46bfdbc373..4b7731bf23a8 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -3,6 +3,8 @@
3#include <linux/perf_event.h> 3#include <linux/perf_event.h>
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#include <asm/hardirq.h>
7
6#include "perf_event.h" 8#include "perf_event.h"
7 9
8static const u64 knc_perfmon_event_map[] = 10static const u64 knc_perfmon_event_map[] =
@@ -173,30 +175,100 @@ static void knc_pmu_enable_all(int added)
173static inline void 175static inline void
174knc_pmu_disable_event(struct perf_event *event) 176knc_pmu_disable_event(struct perf_event *event)
175{ 177{
176 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
177 struct hw_perf_event *hwc = &event->hw; 178 struct hw_perf_event *hwc = &event->hw;
178 u64 val; 179 u64 val;
179 180
180 val = hwc->config; 181 val = hwc->config;
181 if (cpuc->enabled) 182 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
182 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
183 183
184 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); 184 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
185} 185}
186 186
187static void knc_pmu_enable_event(struct perf_event *event) 187static void knc_pmu_enable_event(struct perf_event *event)
188{ 188{
189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
190 struct hw_perf_event *hwc = &event->hw; 189 struct hw_perf_event *hwc = &event->hw;
191 u64 val; 190 u64 val;
192 191
193 val = hwc->config; 192 val = hwc->config;
194 if (cpuc->enabled) 193 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
195 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
196 194
197 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); 195 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
198} 196}
199 197
198static inline u64 knc_pmu_get_status(void)
199{
200 u64 status;
201
202 rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status);
203
204 return status;
205}
206
207static inline void knc_pmu_ack_status(u64 ack)
208{
209 wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack);
210}
211
212static int knc_pmu_handle_irq(struct pt_regs *regs)
213{
214 struct perf_sample_data data;
215 struct cpu_hw_events *cpuc;
216 int handled = 0;
217 int bit, loops;
218 u64 status;
219
220 cpuc = &__get_cpu_var(cpu_hw_events);
221
222 knc_pmu_disable_all();
223
224 status = knc_pmu_get_status();
225 if (!status) {
226 knc_pmu_enable_all(0);
227 return handled;
228 }
229
230 loops = 0;
231again:
232 knc_pmu_ack_status(status);
233 if (++loops > 100) {
234 WARN_ONCE(1, "perf: irq loop stuck!\n");
235 perf_event_print_debug();
236 goto done;
237 }
238
239 inc_irq_stat(apic_perf_irqs);
240
241 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
242 struct perf_event *event = cpuc->events[bit];
243
244 handled++;
245
246 if (!test_bit(bit, cpuc->active_mask))
247 continue;
248
249 if (!intel_pmu_save_and_restart(event))
250 continue;
251
252 perf_sample_data_init(&data, 0, event->hw.last_period);
253
254 if (perf_event_overflow(event, &data, regs))
255 x86_pmu_stop(event, 0);
256 }
257
258 /*
259 * Repeat if there is more work to be done:
260 */
261 status = knc_pmu_get_status();
262 if (status)
263 goto again;
264
265done:
266 knc_pmu_enable_all(0);
267
268 return handled;
269}
270
271
200PMU_FORMAT_ATTR(event, "config:0-7" ); 272PMU_FORMAT_ATTR(event, "config:0-7" );
201PMU_FORMAT_ATTR(umask, "config:8-15" ); 273PMU_FORMAT_ATTR(umask, "config:8-15" );
202PMU_FORMAT_ATTR(edge, "config:18" ); 274PMU_FORMAT_ATTR(edge, "config:18" );
@@ -214,7 +286,7 @@ static struct attribute *intel_knc_formats_attr[] = {
214 286
215static __initconst struct x86_pmu knc_pmu = { 287static __initconst struct x86_pmu knc_pmu = {
216 .name = "knc", 288 .name = "knc",
217 .handle_irq = x86_pmu_handle_irq, 289 .handle_irq = knc_pmu_handle_irq,
218 .disable_all = knc_pmu_disable_all, 290 .disable_all = knc_pmu_disable_all,
219 .enable_all = knc_pmu_enable_all, 291 .enable_all = knc_pmu_enable_all,
220 .enable = knc_pmu_enable_event, 292 .enable = knc_pmu_enable_event,
@@ -226,12 +298,11 @@ static __initconst struct x86_pmu knc_pmu = {
226 .event_map = knc_pmu_event_map, 298 .event_map = knc_pmu_event_map,
227 .max_events = ARRAY_SIZE(knc_perfmon_event_map), 299 .max_events = ARRAY_SIZE(knc_perfmon_event_map),
228 .apic = 1, 300 .apic = 1,
229 .max_period = (1ULL << 31) - 1, 301 .max_period = (1ULL << 39) - 1,
230 .version = 0, 302 .version = 0,
231 .num_counters = 2, 303 .num_counters = 2,
232 /* in theory 40 bits, early silicon is buggy though */ 304 .cntval_bits = 40,
233 .cntval_bits = 32, 305 .cntval_mask = (1ULL << 40) - 1,
234 .cntval_mask = (1ULL << 32) - 1,
235 .get_event_constraints = x86_get_event_constraints, 306 .get_event_constraints = x86_get_event_constraints,
236 .event_constraints = knc_event_constraints, 307 .event_constraints = knc_event_constraints,
237 .format_attrs = intel_knc_formats_attr, 308 .format_attrs = intel_knc_formats_attr,
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index e4dd0f7a0453..7d0270bd793e 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -8,13 +8,106 @@
8 */ 8 */
9static const u64 p6_perfmon_event_map[] = 9static const u64 p6_perfmon_event_map[] =
10{ 10{
11 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, 11 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, /* CPU_CLK_UNHALTED */
12 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 12 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, /* INST_RETIRED */
13 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, 13 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, /* L2_RQSTS:M:E:S:I */
14 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, 14 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, /* L2_RQSTS:I */
15 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 15 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, /* BR_INST_RETIRED */
16 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 16 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, /* BR_MISS_PRED_RETIRED */
17 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, 17 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, /* BUS_DRDY_CLOCKS */
18 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a2, /* RESOURCE_STALLS */
19
20};
21
22static __initconst u64 p6_hw_cache_event_ids
23 [PERF_COUNT_HW_CACHE_MAX]
24 [PERF_COUNT_HW_CACHE_OP_MAX]
25 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
26{
27 [ C(L1D) ] = {
28 [ C(OP_READ) ] = {
29 [ C(RESULT_ACCESS) ] = 0x0043, /* DATA_MEM_REFS */
30 [ C(RESULT_MISS) ] = 0x0045, /* DCU_LINES_IN */
31 },
32 [ C(OP_WRITE) ] = {
33 [ C(RESULT_ACCESS) ] = 0,
34 [ C(RESULT_MISS) ] = 0x0f29, /* L2_LD:M:E:S:I */
35 },
36 [ C(OP_PREFETCH) ] = {
37 [ C(RESULT_ACCESS) ] = 0,
38 [ C(RESULT_MISS) ] = 0,
39 },
40 },
41 [ C(L1I ) ] = {
42 [ C(OP_READ) ] = {
43 [ C(RESULT_ACCESS) ] = 0x0080, /* IFU_IFETCH */
44 [ C(RESULT_MISS) ] = 0x0f28, /* L2_IFETCH:M:E:S:I */
45 },
46 [ C(OP_WRITE) ] = {
47 [ C(RESULT_ACCESS) ] = -1,
48 [ C(RESULT_MISS) ] = -1,
49 },
50 [ C(OP_PREFETCH) ] = {
51 [ C(RESULT_ACCESS) ] = 0,
52 [ C(RESULT_MISS) ] = 0,
53 },
54 },
55 [ C(LL ) ] = {
56 [ C(OP_READ) ] = {
57 [ C(RESULT_ACCESS) ] = 0,
58 [ C(RESULT_MISS) ] = 0,
59 },
60 [ C(OP_WRITE) ] = {
61 [ C(RESULT_ACCESS) ] = 0,
62 [ C(RESULT_MISS) ] = 0x0025, /* L2_M_LINES_INM */
63 },
64 [ C(OP_PREFETCH) ] = {
65 [ C(RESULT_ACCESS) ] = 0,
66 [ C(RESULT_MISS) ] = 0,
67 },
68 },
69 [ C(DTLB) ] = {
70 [ C(OP_READ) ] = {
71 [ C(RESULT_ACCESS) ] = 0x0043, /* DATA_MEM_REFS */
72 [ C(RESULT_MISS) ] = 0,
73 },
74 [ C(OP_WRITE) ] = {
75 [ C(RESULT_ACCESS) ] = 0,
76 [ C(RESULT_MISS) ] = 0,
77 },
78 [ C(OP_PREFETCH) ] = {
79 [ C(RESULT_ACCESS) ] = 0,
80 [ C(RESULT_MISS) ] = 0,
81 },
82 },
83 [ C(ITLB) ] = {
84 [ C(OP_READ) ] = {
85 [ C(RESULT_ACCESS) ] = 0x0080, /* IFU_IFETCH */
86 [ C(RESULT_MISS) ] = 0x0085, /* ITLB_MISS */
87 },
88 [ C(OP_WRITE) ] = {
89 [ C(RESULT_ACCESS) ] = -1,
90 [ C(RESULT_MISS) ] = -1,
91 },
92 [ C(OP_PREFETCH) ] = {
93 [ C(RESULT_ACCESS) ] = -1,
94 [ C(RESULT_MISS) ] = -1,
95 },
96 },
97 [ C(BPU ) ] = {
98 [ C(OP_READ) ] = {
99 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED */
100 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISS_PRED_RETIRED */
101 },
102 [ C(OP_WRITE) ] = {
103 [ C(RESULT_ACCESS) ] = -1,
104 [ C(RESULT_MISS) ] = -1,
105 },
106 [ C(OP_PREFETCH) ] = {
107 [ C(RESULT_ACCESS) ] = -1,
108 [ C(RESULT_MISS) ] = -1,
109 },
110 },
18}; 111};
19 112
20static u64 p6_pmu_event_map(int hw_event) 113static u64 p6_pmu_event_map(int hw_event)
@@ -34,7 +127,7 @@ static struct event_constraint p6_event_constraints[] =
34{ 127{
35 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ 128 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
36 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 129 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
37 INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ 130 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
38 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 131 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
39 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 132 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
40 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 133 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
@@ -64,25 +157,25 @@ static void p6_pmu_enable_all(int added)
64static inline void 157static inline void
65p6_pmu_disable_event(struct perf_event *event) 158p6_pmu_disable_event(struct perf_event *event)
66{ 159{
67 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
68 struct hw_perf_event *hwc = &event->hw; 160 struct hw_perf_event *hwc = &event->hw;
69 u64 val = P6_NOP_EVENT; 161 u64 val = P6_NOP_EVENT;
70 162
71 if (cpuc->enabled)
72 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
73
74 (void)wrmsrl_safe(hwc->config_base, val); 163 (void)wrmsrl_safe(hwc->config_base, val);
75} 164}
76 165
77static void p6_pmu_enable_event(struct perf_event *event) 166static void p6_pmu_enable_event(struct perf_event *event)
78{ 167{
79 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
80 struct hw_perf_event *hwc = &event->hw; 168 struct hw_perf_event *hwc = &event->hw;
81 u64 val; 169 u64 val;
82 170
83 val = hwc->config; 171 val = hwc->config;
84 if (cpuc->enabled) 172
85 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 173 /*
174 * p6 only has a global event enable, set on PerfEvtSel0
175 * We "disable" events by programming P6_NOP_EVENT
176 * and we rely on p6_pmu_enable_all() being called
177 * to actually enable the events.
178 */
86 179
87 (void)wrmsrl_safe(hwc->config_base, val); 180 (void)wrmsrl_safe(hwc->config_base, val);
88} 181}
@@ -158,5 +251,9 @@ __init int p6_pmu_init(void)
158 251
159 x86_pmu = p6_pmu; 252 x86_pmu = p6_pmu;
160 253
254 memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
255 sizeof(hw_cache_event_ids));
256
257
161 return 0; 258 return 0;
162} 259}
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ed858e9e9a74..df06ade26bef 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1077,6 +1077,9 @@ void __init memblock_x86_fill(void)
1077 memblock_add(ei->addr, ei->size); 1077 memblock_add(ei->addr, ei->size);
1078 } 1078 }
1079 1079
1080 /* throw away partial pages */
1081 memblock_trim_memory(PAGE_SIZE);
1082
1080 memblock_dump_all(); 1083 memblock_dump_all();
1081} 1084}
1082 1085
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index a1193aef6d7d..88b725aa1d52 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1035,7 +1035,7 @@ ENTRY(xen_sysenter_target)
1035 1035
1036ENTRY(xen_hypervisor_callback) 1036ENTRY(xen_hypervisor_callback)
1037 CFI_STARTPROC 1037 CFI_STARTPROC
1038 pushl_cfi $0 1038 pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1039 SAVE_ALL 1039 SAVE_ALL
1040 TRACE_IRQS_OFF 1040 TRACE_IRQS_OFF
1041 1041
@@ -1077,14 +1077,16 @@ ENTRY(xen_failsafe_callback)
10772: mov 8(%esp),%es 10772: mov 8(%esp),%es
10783: mov 12(%esp),%fs 10783: mov 12(%esp),%fs
10794: mov 16(%esp),%gs 10794: mov 16(%esp),%gs
1080 /* EAX == 0 => Category 1 (Bad segment)
1081 EAX != 0 => Category 2 (Bad IRET) */
1080 testl %eax,%eax 1082 testl %eax,%eax
1081 popl_cfi %eax 1083 popl_cfi %eax
1082 lea 16(%esp),%esp 1084 lea 16(%esp),%esp
1083 CFI_ADJUST_CFA_OFFSET -16 1085 CFI_ADJUST_CFA_OFFSET -16
1084 jz 5f 1086 jz 5f
1085 addl $16,%esp 1087 addl $16,%esp
1086 jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) 1088 jmp iret_exc
10875: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment) 10895: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1088 SAVE_ALL 1090 SAVE_ALL
1089 jmp ret_from_exception 1091 jmp ret_from_exception
1090 CFI_ENDPROC 1092 CFI_ENDPROC
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 0c58952d64e8..b51b2c7ee51f 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1435,7 +1435,7 @@ ENTRY(xen_failsafe_callback)
1435 CFI_RESTORE r11 1435 CFI_RESTORE r11
1436 addq $0x30,%rsp 1436 addq $0x30,%rsp
1437 CFI_ADJUST_CFA_OFFSET -0x30 1437 CFI_ADJUST_CFA_OFFSET -0x30
1438 pushq_cfi $0 1438 pushq_cfi $-1 /* orig_ax = -1 => not a system call */
1439 SAVE_ALL 1439 SAVE_ALL
1440 jmp error_exit 1440 jmp error_exit
1441 CFI_ENDPROC 1441 CFI_ENDPROC
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index b3e5e51bc907..4180a874c764 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -247,7 +247,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
247 break; 247 break;
248 case KVM_PV_REASON_PAGE_NOT_PRESENT: 248 case KVM_PV_REASON_PAGE_NOT_PRESENT:
249 /* page is swapped out by the host. */ 249 /* page is swapped out by the host. */
250 rcu_irq_enter();
251 exit_idle();
250 kvm_async_pf_task_wait((u32)read_cr2()); 252 kvm_async_pf_task_wait((u32)read_cr2());
253 rcu_irq_exit();
251 break; 254 break;
252 case KVM_PV_REASON_PAGE_READY: 255 case KVM_PV_REASON_PAGE_READY:
253 rcu_irq_enter(); 256 rcu_irq_enter();
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 468e98dfd44e..ca45696f30fb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -921,18 +921,19 @@ void __init setup_arch(char **cmdline_p)
921#ifdef CONFIG_X86_64 921#ifdef CONFIG_X86_64
922 if (max_pfn > max_low_pfn) { 922 if (max_pfn > max_low_pfn) {
923 int i; 923 int i;
924 for (i = 0; i < e820.nr_map; i++) { 924 unsigned long start, end;
925 struct e820entry *ei = &e820.map[i]; 925 unsigned long start_pfn, end_pfn;
926 926
927 if (ei->addr + ei->size <= 1UL << 32) 927 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn,
928 continue; 928 NULL) {
929 929
930 if (ei->type == E820_RESERVED) 930 end = PFN_PHYS(end_pfn);
931 if (end <= (1UL<<32))
931 continue; 932 continue;
932 933
934 start = PFN_PHYS(start_pfn);
933 max_pfn_mapped = init_memory_mapping( 935 max_pfn_mapped = init_memory_mapping(
934 ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr, 936 max((1UL<<32), start), end);
935 ei->addr + ei->size);
936 } 937 }
937 938
938 /* can we preseve max_low_pfn ?*/ 939 /* can we preseve max_low_pfn ?*/
@@ -1048,6 +1049,18 @@ void __init setup_arch(char **cmdline_p)
1048 arch_init_ideal_nops(); 1049 arch_init_ideal_nops();
1049 1050
1050 register_refined_jiffies(CLOCK_TICK_RATE); 1051 register_refined_jiffies(CLOCK_TICK_RATE);
1052
1053#ifdef CONFIG_EFI
1054 /* Once setup is done above, disable efi_enabled on mismatched
1055 * firmware/kernel archtectures since there is no support for
1056 * runtime services.
1057 */
1058 if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
1059 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
1060 efi_unmap_memmap();
1061 efi_enabled = 0;
1062 }
1063#endif
1051} 1064}
1052 1065
1053#ifdef CONFIG_X86_32 1066#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 29ad351804e9..70b27ee6118e 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -824,10 +824,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
824 mce_notify_process(); 824 mce_notify_process();
825#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ 825#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
826 826
827 if (thread_info_flags & _TIF_UPROBE) { 827 if (thread_info_flags & _TIF_UPROBE)
828 clear_thread_flag(TIF_UPROBE);
829 uprobe_notify_resume(regs); 828 uprobe_notify_resume(regs);
830 }
831 829
832 /* deal with pending signal delivery */ 830 /* deal with pending signal delivery */
833 if (thread_info_flags & _TIF_SIGPENDING) 831 if (thread_info_flags & _TIF_SIGPENDING)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 9538f00827a9..aafa5557b396 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -651,31 +651,19 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
651 651
652/* 652/*
653 * Skip these instructions as per the currently known x86 ISA. 653 * Skip these instructions as per the currently known x86 ISA.
654 * 0x66* { 0x90 | 0x0f 0x1f | 0x0f 0x19 | 0x87 0xc0 } 654 * rep=0x66*; nop=0x90
655 */ 655 */
656static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) 656static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
657{ 657{
658 int i; 658 int i;
659 659
660 for (i = 0; i < MAX_UINSN_BYTES; i++) { 660 for (i = 0; i < MAX_UINSN_BYTES; i++) {
661 if ((auprobe->insn[i] == 0x66)) 661 if (auprobe->insn[i] == 0x66)
662 continue; 662 continue;
663 663
664 if (auprobe->insn[i] == 0x90) 664 if (auprobe->insn[i] == 0x90)
665 return true; 665 return true;
666 666
667 if (i == (MAX_UINSN_BYTES - 1))
668 break;
669
670 if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x1f))
671 return true;
672
673 if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x19))
674 return true;
675
676 if ((auprobe->insn[i] == 0x87) && (auprobe->insn[i+1] == 0xc0))
677 return true;
678
679 break; 667 break;
680 } 668 }
681 return false; 669 return false;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c6e6b721b6ee..43e9fadca5d0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1311,7 +1311,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1311 vcpu->arch.apic_base = value; 1311 vcpu->arch.apic_base = value;
1312 if (apic_x2apic_mode(apic)) { 1312 if (apic_x2apic_mode(apic)) {
1313 u32 id = kvm_apic_id(apic); 1313 u32 id = kvm_apic_id(apic);
1314 u32 ldr = ((id & ~0xf) << 16) | (1 << (id & 0xf)); 1314 u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
1315 kvm_apic_set_ldr(apic, ldr); 1315 kvm_apic_set_ldr(apic, ldr);
1316 } 1316 }
1317 apic->base_address = apic->vcpu->arch.apic_base & 1317 apic->base_address = apic->vcpu->arch.apic_base &
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d289fee1ffb8..6f85fe0bf958 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2497,8 +2497,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2497 } 2497 }
2498 } 2498 }
2499 2499
2500 if (!is_error_pfn(pfn)) 2500 kvm_release_pfn_clean(pfn);
2501 kvm_release_pfn_clean(pfn);
2502} 2501}
2503 2502
2504static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) 2503static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ab1f6a93b527..d7aea41563b3 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -35,40 +35,44 @@ struct map_range {
35 unsigned page_size_mask; 35 unsigned page_size_mask;
36}; 36};
37 37
38static void __init find_early_table_space(struct map_range *mr, unsigned long end, 38/*
39 int use_pse, int use_gbpages) 39 * First calculate space needed for kernel direct mapping page tables to cover
40 * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
41 * pages. Then find enough contiguous space for those page tables.
42 */
43static void __init find_early_table_space(struct map_range *mr, int nr_range)
40{ 44{
41 unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; 45 int i;
46 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
47 unsigned long start = 0, good_end;
42 phys_addr_t base; 48 phys_addr_t base;
43 49
44 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 50 for (i = 0; i < nr_range; i++) {
45 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); 51 unsigned long range, extra;
46
47 if (use_gbpages) {
48 unsigned long extra;
49
50 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
51 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
52 } else
53 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
54 52
55 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); 53 range = mr[i].end - mr[i].start;
54 puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
56 55
57 if (use_pse) { 56 if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
58 unsigned long extra; 57 extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
58 pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
59 } else {
60 pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
61 }
59 62
60 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 63 if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
64 extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
61#ifdef CONFIG_X86_32 65#ifdef CONFIG_X86_32
62 extra += PMD_SIZE; 66 extra += PMD_SIZE;
63#endif 67#endif
64 /* The first 2/4M doesn't use large pages. */ 68 ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
65 if (mr->start < PMD_SIZE) 69 } else {
66 extra += mr->end - mr->start; 70 ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
67 71 }
68 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 72 }
69 } else
70 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
71 73
74 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
75 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
72 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); 76 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
73 77
74#ifdef CONFIG_X86_32 78#ifdef CONFIG_X86_32
@@ -86,7 +90,7 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
86 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); 90 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
87 91
88 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", 92 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
89 end - 1, pgt_buf_start << PAGE_SHIFT, 93 mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
90 (pgt_buf_top << PAGE_SHIFT) - 1); 94 (pgt_buf_top << PAGE_SHIFT) - 1);
91} 95}
92 96
@@ -267,7 +271,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
267 * nodes are discovered. 271 * nodes are discovered.
268 */ 272 */
269 if (!after_bootmem) 273 if (!after_bootmem)
270 find_early_table_space(&mr[0], end, use_pse, use_gbpages); 274 find_early_table_space(mr, nr_range);
271 275
272 for (i = 0; i < nr_range; i++) 276 for (i = 0; i < nr_range; i++)
273 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 277 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 2b6b4a3c8beb..3baff255adac 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -386,7 +386,8 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
386 * these mappings are more intelligent. 386 * these mappings are more intelligent.
387 */ 387 */
388 if (pte_val(*pte)) { 388 if (pte_val(*pte)) {
389 pages++; 389 if (!after_bootmem)
390 pages++;
390 continue; 391 continue;
391 } 392 }
392 393
@@ -451,6 +452,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
451 * attributes. 452 * attributes.
452 */ 453 */
453 if (page_size_mask & (1 << PG_LEVEL_2M)) { 454 if (page_size_mask & (1 << PG_LEVEL_2M)) {
455 if (!after_bootmem)
456 pages++;
454 last_map_addr = next; 457 last_map_addr = next;
455 continue; 458 continue;
456 } 459 }
@@ -526,6 +529,8 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
526 * attributes. 529 * attributes.
527 */ 530 */
528 if (page_size_mask & (1 << PG_LEVEL_1G)) { 531 if (page_size_mask & (1 << PG_LEVEL_1G)) {
532 if (!after_bootmem)
533 pages++;
529 last_map_addr = next; 534 last_map_addr = next;
530 continue; 535 continue;
531 } 536 }
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 26b8a8514ee5..48768df2471a 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
55 val |= counter_config->extra; 55 val |= counter_config->extra;
56 event &= model->event_mask ? model->event_mask : 0xFF; 56 event &= model->event_mask ? model->event_mask : 0xFF;
57 val |= event & 0xFF; 57 val |= event & 0xFF;
58 val |= (event & 0x0F00) << 24; 58 val |= (u64)(event & 0x0F00) << 24;
59 59
60 return val; 60 return val;
61} 61}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index aded2a91162a..ad4439145f85 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -70,11 +70,15 @@ EXPORT_SYMBOL(efi);
70struct efi_memory_map memmap; 70struct efi_memory_map memmap;
71 71
72bool efi_64bit; 72bool efi_64bit;
73static bool efi_native;
74 73
75static struct efi efi_phys __initdata; 74static struct efi efi_phys __initdata;
76static efi_system_table_t efi_systab __initdata; 75static efi_system_table_t efi_systab __initdata;
77 76
77static inline bool efi_is_native(void)
78{
79 return IS_ENABLED(CONFIG_X86_64) == efi_64bit;
80}
81
78static int __init setup_noefi(char *arg) 82static int __init setup_noefi(char *arg)
79{ 83{
80 efi_enabled = 0; 84 efi_enabled = 0;
@@ -420,7 +424,7 @@ void __init efi_reserve_boot_services(void)
420 } 424 }
421} 425}
422 426
423static void __init efi_unmap_memmap(void) 427void __init efi_unmap_memmap(void)
424{ 428{
425 if (memmap.map) { 429 if (memmap.map) {
426 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); 430 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
@@ -432,7 +436,7 @@ void __init efi_free_boot_services(void)
432{ 436{
433 void *p; 437 void *p;
434 438
435 if (!efi_native) 439 if (!efi_is_native())
436 return; 440 return;
437 441
438 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 442 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
@@ -684,12 +688,10 @@ void __init efi_init(void)
684 return; 688 return;
685 } 689 }
686 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; 690 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
687 efi_native = !efi_64bit;
688#else 691#else
689 efi_phys.systab = (efi_system_table_t *) 692 efi_phys.systab = (efi_system_table_t *)
690 (boot_params.efi_info.efi_systab | 693 (boot_params.efi_info.efi_systab |
691 ((__u64)boot_params.efi_info.efi_systab_hi<<32)); 694 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
692 efi_native = efi_64bit;
693#endif 695#endif
694 696
695 if (efi_systab_init(efi_phys.systab)) { 697 if (efi_systab_init(efi_phys.systab)) {
@@ -723,7 +725,7 @@ void __init efi_init(void)
723 * that doesn't match the kernel 32/64-bit mode. 725 * that doesn't match the kernel 32/64-bit mode.
724 */ 726 */
725 727
726 if (!efi_native) 728 if (!efi_is_native())
727 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); 729 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
728 else if (efi_runtime_init()) { 730 else if (efi_runtime_init()) {
729 efi_enabled = 0; 731 efi_enabled = 0;
@@ -735,7 +737,7 @@ void __init efi_init(void)
735 return; 737 return;
736 } 738 }
737#ifdef CONFIG_X86_32 739#ifdef CONFIG_X86_32
738 if (efi_native) { 740 if (efi_is_native()) {
739 x86_platform.get_wallclock = efi_get_time; 741 x86_platform.get_wallclock = efi_get_time;
740 x86_platform.set_wallclock = efi_set_rtc_mmss; 742 x86_platform.set_wallclock = efi_set_rtc_mmss;
741 } 743 }
@@ -810,6 +812,16 @@ void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
810 return NULL; 812 return NULL;
811} 813}
812 814
815void efi_memory_uc(u64 addr, unsigned long size)
816{
817 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
818 u64 npages;
819
820 npages = round_up(size, page_shift) / page_shift;
821 memrange_efi_to_native(&addr, &npages);
822 set_memory_uc(addr, npages);
823}
824
813/* 825/*
814 * This function will switch the EFI runtime services to virtual mode. 826 * This function will switch the EFI runtime services to virtual mode.
815 * Essentially, look through the EFI memmap and map every region that 827 * Essentially, look through the EFI memmap and map every region that
@@ -823,7 +835,7 @@ void __init efi_enter_virtual_mode(void)
823 efi_memory_desc_t *md, *prev_md = NULL; 835 efi_memory_desc_t *md, *prev_md = NULL;
824 efi_status_t status; 836 efi_status_t status;
825 unsigned long size; 837 unsigned long size;
826 u64 end, systab, addr, npages, end_pfn; 838 u64 end, systab, end_pfn;
827 void *p, *va, *new_memmap = NULL; 839 void *p, *va, *new_memmap = NULL;
828 int count = 0; 840 int count = 0;
829 841
@@ -834,7 +846,7 @@ void __init efi_enter_virtual_mode(void)
834 * non-native EFI 846 * non-native EFI
835 */ 847 */
836 848
837 if (!efi_native) { 849 if (!efi_is_native()) {
838 efi_unmap_memmap(); 850 efi_unmap_memmap();
839 return; 851 return;
840 } 852 }
@@ -879,10 +891,14 @@ void __init efi_enter_virtual_mode(void)
879 end_pfn = PFN_UP(end); 891 end_pfn = PFN_UP(end);
880 if (end_pfn <= max_low_pfn_mapped 892 if (end_pfn <= max_low_pfn_mapped
881 || (end_pfn > (1UL << (32 - PAGE_SHIFT)) 893 || (end_pfn > (1UL << (32 - PAGE_SHIFT))
882 && end_pfn <= max_pfn_mapped)) 894 && end_pfn <= max_pfn_mapped)) {
883 va = __va(md->phys_addr); 895 va = __va(md->phys_addr);
884 else 896
885 va = efi_ioremap(md->phys_addr, size, md->type); 897 if (!(md->attribute & EFI_MEMORY_WB))
898 efi_memory_uc((u64)(unsigned long)va, size);
899 } else
900 va = efi_ioremap(md->phys_addr, size,
901 md->type, md->attribute);
886 902
887 md->virt_addr = (u64) (unsigned long) va; 903 md->virt_addr = (u64) (unsigned long) va;
888 904
@@ -892,13 +908,6 @@ void __init efi_enter_virtual_mode(void)
892 continue; 908 continue;
893 } 909 }
894 910
895 if (!(md->attribute & EFI_MEMORY_WB)) {
896 addr = md->virt_addr;
897 npages = md->num_pages;
898 memrange_efi_to_native(&addr, &npages);
899 set_memory_uc(addr, npages);
900 }
901
902 systab = (u64) (unsigned long) efi_phys.systab; 911 systab = (u64) (unsigned long) efi_phys.systab;
903 if (md->phys_addr <= systab && systab < end) { 912 if (md->phys_addr <= systab && systab < end) {
904 systab += md->virt_addr - md->phys_addr; 913 systab += md->virt_addr - md->phys_addr;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ac3aa54e2654..95fd505dfeb6 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -82,7 +82,7 @@ void __init efi_call_phys_epilog(void)
82} 82}
83 83
84void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 84void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
85 u32 type) 85 u32 type, u64 attribute)
86{ 86{
87 unsigned long last_map_pfn; 87 unsigned long last_map_pfn;
88 88
@@ -92,8 +92,11 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
92 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 92 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
93 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 93 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
94 unsigned long top = last_map_pfn << PAGE_SHIFT; 94 unsigned long top = last_map_pfn << PAGE_SHIFT;
95 efi_ioremap(top, size - (top - phys_addr), type); 95 efi_ioremap(top, size - (top - phys_addr), type, attribute);
96 } 96 }
97 97
98 if (!(attribute & EFI_MEMORY_WB))
99 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
100
98 return (void __iomem *)__va(phys_addr); 101 return (void __iomem *)__va(phys_addr);
99} 102}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e3497f240eab..586d83812b67 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -81,8 +81,6 @@
81#include "smp.h" 81#include "smp.h"
82#include "multicalls.h" 82#include "multicalls.h"
83 83
84#include <xen/events.h>
85
86EXPORT_SYMBOL_GPL(hypercall_page); 84EXPORT_SYMBOL_GPL(hypercall_page);
87 85
88DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 86DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 08b4c5209384..b34b5cda5ae1 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -236,7 +236,7 @@ config CMA_SIZE_PERCENTAGE
236 236
237choice 237choice
238 prompt "Selected region size" 238 prompt "Selected region size"
239 default CMA_SIZE_SEL_ABSOLUTE 239 default CMA_SIZE_SEL_MBYTES
240 240
241config CMA_SIZE_SEL_MBYTES 241config CMA_SIZE_SEL_MBYTES
242 bool "Use mega bytes value only" 242 bool "Use mega bytes value only"
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 560a7173f810..bc256b641027 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -191,9 +191,8 @@ EXPORT_SYMBOL(dma_release_from_coherent);
191 * This checks whether the memory was allocated from the per-device 191 * This checks whether the memory was allocated from the per-device
192 * coherent memory pool and if so, maps that memory to the provided vma. 192 * coherent memory pool and if so, maps that memory to the provided vma.
193 * 193 *
194 * Returns 1 if we correctly mapped the memory, or 0 if 194 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
195 * dma_release_coherent() should proceed with mapping memory from 195 * proceed with mapping memory from generic pools.
196 * generic pools.
197 */ 196 */
198int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, 197int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
199 void *vaddr, size_t size, int *ret) 198 void *vaddr, size_t size, int *ret)
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 9a1469474f55..612afcc5a938 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -27,15 +27,12 @@
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/page-isolation.h> 29#include <linux/page-isolation.h>
30#include <linux/sizes.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31#include <linux/swap.h> 32#include <linux/swap.h>
32#include <linux/mm_types.h> 33#include <linux/mm_types.h>
33#include <linux/dma-contiguous.h> 34#include <linux/dma-contiguous.h>
34 35
35#ifndef SZ_1M
36#define SZ_1M (1 << 20)
37#endif
38
39struct cma { 36struct cma {
40 unsigned long base_pfn; 37 unsigned long base_pfn;
41 unsigned long count; 38 unsigned long count;
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 6be390bd8bd1..f0d30543fcce 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,7 +3,7 @@
3# subsystems should select the appropriate symbols. 3# subsystems should select the appropriate symbols.
4 4
5config REGMAP 5config REGMAP
6 default y if (REGMAP_I2C || REGMAP_SPI) 6 default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ)
7 select LZO_COMPRESS 7 select LZO_COMPRESS
8 select LZO_DECOMPRESS 8 select LZO_DECOMPRESS
9 select IRQ_DOMAIN if REGMAP_IRQ 9 select IRQ_DOMAIN if REGMAP_IRQ
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 677cd6e4e1a1..d4c12180c654 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -90,6 +90,17 @@ config DW_DMAC
90 Support the Synopsys DesignWare AHB DMA controller. This 90 Support the Synopsys DesignWare AHB DMA controller. This
91 can be integrated in chips such as the Atmel AT32ap7000. 91 can be integrated in chips such as the Atmel AT32ap7000.
92 92
93config DW_DMAC_BIG_ENDIAN_IO
94 bool "Use big endian I/O register access"
95 default y if AVR32
96 depends on DW_DMAC
97 help
98 Say yes here to use big endian I/O access when reading and writing
99 to the DMA controller registers. This is needed on some platforms,
100 like the Atmel AVR32 architecture.
101
102 If unsure, use the default setting.
103
93config AT_HDMAC 104config AT_HDMAC
94 tristate "Atmel AHB DMA support" 105 tristate "Atmel AHB DMA support"
95 depends on ARCH_AT91 106 depends on ARCH_AT91
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index ff39fa6cd2bc..88965597b7d0 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -98,9 +98,17 @@ struct dw_dma_regs {
98 u32 DW_PARAMS; 98 u32 DW_PARAMS;
99}; 99};
100 100
101#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
102#define dma_readl_native ioread32be
103#define dma_writel_native iowrite32be
104#else
105#define dma_readl_native readl
106#define dma_writel_native writel
107#endif
108
101/* To access the registers in early stage of probe */ 109/* To access the registers in early stage of probe */
102#define dma_read_byaddr(addr, name) \ 110#define dma_read_byaddr(addr, name) \
103 readl((addr) + offsetof(struct dw_dma_regs, name)) 111 dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
104 112
105/* Bitfields in DW_PARAMS */ 113/* Bitfields in DW_PARAMS */
106#define DW_PARAMS_NR_CHAN 8 /* number of channels */ 114#define DW_PARAMS_NR_CHAN 8 /* number of channels */
@@ -216,9 +224,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
216} 224}
217 225
218#define channel_readl(dwc, name) \ 226#define channel_readl(dwc, name) \
219 readl(&(__dwc_regs(dwc)->name)) 227 dma_readl_native(&(__dwc_regs(dwc)->name))
220#define channel_writel(dwc, name, val) \ 228#define channel_writel(dwc, name, val) \
221 writel((val), &(__dwc_regs(dwc)->name)) 229 dma_writel_native((val), &(__dwc_regs(dwc)->name))
222 230
223static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) 231static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
224{ 232{
@@ -246,9 +254,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
246} 254}
247 255
248#define dma_readl(dw, name) \ 256#define dma_readl(dw, name) \
249 readl(&(__dw_regs(dw)->name)) 257 dma_readl_native(&(__dw_regs(dw)->name))
250#define dma_writel(dw, name, val) \ 258#define dma_writel(dw, name, val) \
251 writel((val), &(__dw_regs(dw)->name)) 259 dma_writel_native((val), &(__dw_regs(dw)->name))
252 260
253#define channel_set_bit(dw, reg, mask) \ 261#define channel_set_bit(dw, reg, mask) \
254 dma_writel(dw, reg, ((mask) << 8) | (mask)) 262 dma_writel(dw, reg, ((mask) << 8) | (mask))
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 5a297a26211d..cc8e7c78a23c 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -170,8 +170,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
170 * memory controller and apply to register. Search for the first 170 * memory controller and apply to register. Search for the first
171 * bandwidth entry that is greater or equal than the setting requested 171 * bandwidth entry that is greater or equal than the setting requested
172 * and program that. If at last entry, turn off DRAM scrubbing. 172 * and program that. If at last entry, turn off DRAM scrubbing.
173 *
174 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
175 * by falling back to the last element in scrubrates[].
173 */ 176 */
174 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 177 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
175 /* 178 /*
176 * skip scrub rates which aren't recommended 179 * skip scrub rates which aren't recommended
177 * (see F10 BKDG, F3x58) 180 * (see F10 BKDG, F3x58)
@@ -181,12 +184,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
181 184
182 if (scrubrates[i].bandwidth <= new_bw) 185 if (scrubrates[i].bandwidth <= new_bw)
183 break; 186 break;
184
185 /*
186 * if no suitable bandwidth found, turn off DRAM scrubbing
187 * entirely by falling back to the last element in the
188 * scrubrates array.
189 */
190 } 187 }
191 188
192 scrubval = scrubrates[i].scrubval; 189 scrubval = scrubrates[i].scrubval;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 09e11a5d921a..fd9d0af4d536 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -206,7 +206,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
206 size_t size; 206 size_t size;
207 int ret; 207 int ret;
208 208
209 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", 209 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
210 sizes->surface_width, sizes->surface_height, 210 sizes->surface_width, sizes->surface_height,
211 sizes->surface_bpp); 211 sizes->surface_bpp);
212 212
@@ -220,7 +220,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
220 220
221 size = mode_cmd.pitches[0] * mode_cmd.height; 221 size = mode_cmd.pitches[0] * mode_cmd.height;
222 obj = drm_gem_cma_create(dev, size); 222 obj = drm_gem_cma_create(dev, size);
223 if (!obj) 223 if (IS_ERR(obj))
224 return -ENOMEM; 224 return -ENOMEM;
225 225
226 fbi = framebuffer_alloc(0, dev->dev); 226 fbi = framebuffer_alloc(0, dev->dev);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 441ebc1bdbef..d4b20ceda3fb 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -205,8 +205,6 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
205 struct drm_gem_object *obj = ptr; 205 struct drm_gem_object *obj = ptr;
206 struct seq_file *m = data; 206 struct seq_file *m = data;
207 207
208 seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
209
210 seq_printf(m, "%6d %8zd %7d %8d\n", 208 seq_printf(m, "%6d %8zd %7d %8d\n",
211 obj->name, obj->size, 209 obj->name, obj->size,
212 atomic_read(&obj->handle_count), 210 atomic_read(&obj->handle_count),
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index aaeb6f8d69ce..b8a282ea8751 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -64,7 +64,6 @@ int drm_get_platform_dev(struct platform_device *platdev,
64 } 64 }
65 65
66 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 66 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
67 dev_set_drvdata(&platdev->dev, dev);
68 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); 67 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
69 if (ret) 68 if (ret)
70 goto err_g1; 69 goto err_g1;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index aac4e5e1a5b9..6770ee6084b4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -118,6 +118,13 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
118MODULE_PARM_DESC(i915_enable_ppgtt, 118MODULE_PARM_DESC(i915_enable_ppgtt,
119 "Enable PPGTT (default: true)"); 119 "Enable PPGTT (default: true)");
120 120
121unsigned int i915_preliminary_hw_support __read_mostly = 0;
122module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
123MODULE_PARM_DESC(preliminary_hw_support,
124 "Enable preliminary hardware support. "
125 "Enable Haswell and ValleyView Support. "
126 "(default: false)");
127
121static struct drm_driver driver; 128static struct drm_driver driver;
122extern int intel_agp_enabled; 129extern int intel_agp_enabled;
123 130
@@ -826,6 +833,12 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
826 struct intel_device_info *intel_info = 833 struct intel_device_info *intel_info =
827 (struct intel_device_info *) ent->driver_data; 834 (struct intel_device_info *) ent->driver_data;
828 835
836 if (intel_info->is_haswell || intel_info->is_valleyview)
837 if(!i915_preliminary_hw_support) {
838 DRM_ERROR("Preliminary hardware support disabled\n");
839 return -ENODEV;
840 }
841
829 /* Only bind to function 0 of the device. Early generations 842 /* Only bind to function 0 of the device. Early generations
830 * used function 1 as a placeholder for multi-head. This causes 843 * used function 1 as a placeholder for multi-head. This causes
831 * us confusion instead, especially on the systems where both 844 * us confusion instead, especially on the systems where both
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b84f7861e438..f511fa2f4168 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1217,6 +1217,7 @@ extern int i915_enable_rc6 __read_mostly;
1217extern int i915_enable_fbc __read_mostly; 1217extern int i915_enable_fbc __read_mostly;
1218extern bool i915_enable_hangcheck __read_mostly; 1218extern bool i915_enable_hangcheck __read_mostly;
1219extern int i915_enable_ppgtt __read_mostly; 1219extern int i915_enable_ppgtt __read_mostly;
1220extern unsigned int i915_preliminary_hw_support __read_mostly;
1220 1221
1221extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1222extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1222extern int i915_resume(struct drm_device *dev); 1223extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d33d02d13c96..107f09befe92 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1407,8 +1407,10 @@ out:
1407 return VM_FAULT_NOPAGE; 1407 return VM_FAULT_NOPAGE;
1408 case -ENOMEM: 1408 case -ENOMEM:
1409 return VM_FAULT_OOM; 1409 return VM_FAULT_OOM;
1410 case -ENOSPC:
1411 return VM_FAULT_SIGBUS;
1410 default: 1412 default:
1411 WARN_ON_ONCE(ret); 1413 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1412 return VM_FAULT_SIGBUS; 1414 return VM_FAULT_SIGBUS;
1413 } 1415 }
1414} 1416}
@@ -1822,10 +1824,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1822 sg_set_page(sg, page, PAGE_SIZE, 0); 1824 sg_set_page(sg, page, PAGE_SIZE, 0);
1823 } 1825 }
1824 1826
1827 obj->pages = st;
1828
1825 if (i915_gem_object_needs_bit17_swizzle(obj)) 1829 if (i915_gem_object_needs_bit17_swizzle(obj))
1826 i915_gem_object_do_bit_17_swizzle(obj); 1830 i915_gem_object_do_bit_17_swizzle(obj);
1827 1831
1828 obj->pages = st;
1829 return 0; 1832 return 0;
1830 1833
1831err_pages: 1834err_pages:
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 893f30164b7e..f78061af7045 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -219,20 +219,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
219 intel_encoder_to_crt(to_intel_encoder(encoder)); 219 intel_encoder_to_crt(to_intel_encoder(encoder));
220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
221 struct drm_i915_private *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
222 int dpll_md_reg; 222 u32 adpa;
223 u32 adpa, dpll_md;
224
225 dpll_md_reg = DPLL_MD(intel_crtc->pipe);
226
227 /*
228 * Disable separate mode multiplier used when cloning SDVO to CRT
229 * XXX this needs to be adjusted when we really are cloning
230 */
231 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
232 dpll_md = I915_READ(dpll_md_reg);
233 I915_WRITE(dpll_md_reg,
234 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
235 }
236 223
237 adpa = ADPA_HOTPLUG_BITS; 224 adpa = ADPA_HOTPLUG_BITS;
238 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 225 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 682bd3729baf..461a637f1ef7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7892,6 +7892,34 @@ struct intel_quirk {
7892 void (*hook)(struct drm_device *dev); 7892 void (*hook)(struct drm_device *dev);
7893}; 7893};
7894 7894
7895/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
7896struct intel_dmi_quirk {
7897 void (*hook)(struct drm_device *dev);
7898 const struct dmi_system_id (*dmi_id_list)[];
7899};
7900
7901static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
7902{
7903 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
7904 return 1;
7905}
7906
7907static const struct intel_dmi_quirk intel_dmi_quirks[] = {
7908 {
7909 .dmi_id_list = &(const struct dmi_system_id[]) {
7910 {
7911 .callback = intel_dmi_reverse_brightness,
7912 .ident = "NCR Corporation",
7913 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
7914 DMI_MATCH(DMI_PRODUCT_NAME, ""),
7915 },
7916 },
7917 { } /* terminating entry */
7918 },
7919 .hook = quirk_invert_brightness,
7920 },
7921};
7922
7895static struct intel_quirk intel_quirks[] = { 7923static struct intel_quirk intel_quirks[] = {
7896 /* HP Mini needs pipe A force quirk (LP: #322104) */ 7924 /* HP Mini needs pipe A force quirk (LP: #322104) */
7897 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 7925 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
@@ -7931,6 +7959,10 @@ static void intel_init_quirks(struct drm_device *dev)
7931 q->subsystem_device == PCI_ANY_ID)) 7959 q->subsystem_device == PCI_ANY_ID))
7932 q->hook(dev); 7960 q->hook(dev);
7933 } 7961 }
7962 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
7963 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
7964 intel_dmi_quirks[i].hook(dev);
7965 }
7934} 7966}
7935 7967
7936/* Disable the VGA plane that we never use */ 7968/* Disable the VGA plane that we never use */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1b727a5c9ee5..368ed8ef1600 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1797,7 +1797,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1797 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1797 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1798 break; 1798 break;
1799 if (i == intel_dp->lane_count && voltage_tries == 5) { 1799 if (i == intel_dp->lane_count && voltage_tries == 5) {
1800 if (++loop_tries == 5) { 1800 ++loop_tries;
1801 if (loop_tries == 5) {
1801 DRM_DEBUG_KMS("too many full retries, give up\n"); 1802 DRM_DEBUG_KMS("too many full retries, give up\n");
1802 break; 1803 break;
1803 } 1804 }
@@ -1807,11 +1808,15 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1807 } 1808 }
1808 1809
1809 /* Check to see if we've tried the same voltage 5 times */ 1810 /* Check to see if we've tried the same voltage 5 times */
1810 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { 1811 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1811 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1812 voltage_tries = 0;
1813 } else
1814 ++voltage_tries; 1812 ++voltage_tries;
1813 if (voltage_tries == 5) {
1814 DRM_DEBUG_KMS("too many voltage retries, give up\n");
1815 break;
1816 }
1817 } else
1818 voltage_tries = 0;
1819 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1815 1820
1816 /* Compute new intel_dp->train_set as requested by target */ 1821 /* Compute new intel_dp->train_set as requested by target */
1817 intel_get_adjust_train(intel_dp, link_status); 1822 intel_get_adjust_train(intel_dp, link_status);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e3166df55daa..edba93b3474b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -777,6 +777,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
777 DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), 777 DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
778 }, 778 },
779 }, 779 },
780 {
781 .callback = intel_no_lvds_dmi_callback,
782 .ident = "Supermicro X7SPA-H",
783 .matches = {
784 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
785 DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
786 },
787 },
780 788
781 { } /* terminating entry */ 789 { } /* terminating entry */
782}; 790};
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0007a4d9bf6e..c01d97db0061 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -139,6 +139,11 @@ struct intel_sdvo {
139 139
140 /* DDC bus used by this SDVO encoder */ 140 /* DDC bus used by this SDVO encoder */
141 uint8_t ddc_bus; 141 uint8_t ddc_bus;
142
143 /*
144 * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
145 */
146 uint8_t dtd_sdvo_flags;
142}; 147};
143 148
144struct intel_sdvo_connector { 149struct intel_sdvo_connector {
@@ -984,6 +989,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
984 return false; 989 return false;
985 990
986 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 991 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
992 intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
987 993
988 return true; 994 return true;
989} 995}
@@ -1092,6 +1098,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1092 * adjusted_mode. 1098 * adjusted_mode.
1093 */ 1099 */
1094 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1100 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1101 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1102 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
1095 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1103 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
1096 DRM_INFO("Setting input timings on %s failed\n", 1104 DRM_INFO("Setting input timings on %s failed\n",
1097 SDVO_NAME(intel_sdvo)); 1105 SDVO_NAME(intel_sdvo));
@@ -2277,10 +2285,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2277 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2285 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2278 } 2286 }
2279 2287
2280 /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling, 2288 /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
2281 * as opposed to native LVDS, where we upscale with the panel-fitter 2289 intel_sdvo->base.cloneable = false;
2282 * (and hence only the native LVDS resolution could be cloned). */
2283 intel_sdvo->base.cloneable = true;
2284 2290
2285 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2291 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2286 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2292 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 1f34549aff18..70586fde69cf 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -39,6 +39,11 @@ nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj)
39 nv_wo32(gpuobj, i, 0x00000000); 39 nv_wo32(gpuobj, i, 0x00000000);
40 } 40 }
41 41
42 if (gpuobj->node) {
43 nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap,
44 &gpuobj->node);
45 }
46
42 if (gpuobj->heap.block_size) 47 if (gpuobj->heap.block_size)
43 nouveau_mm_fini(&gpuobj->heap); 48 nouveau_mm_fini(&gpuobj->heap);
44 49
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index bfddf87926dd..4d6206448670 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -236,7 +236,7 @@ nouveau_mm_fini(struct nouveau_mm *mm)
236 int nodes = 0; 236 int nodes = 0;
237 237
238 list_for_each_entry(node, &mm->nodes, nl_entry) { 238 list_for_each_entry(node, &mm->nodes, nl_entry) {
239 if (nodes++ == mm->heap_nodes) 239 if (WARN_ON(nodes++ == mm->heap_nodes))
240 return -EBUSY; 240 return -EBUSY;
241 } 241 }
242 242
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index dcb5c2befc92..70ca7d5a1aa1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -72,7 +72,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
72 } 72 }
73 73
74 data = of_get_property(dn, "NVDA,BMP", &size); 74 data = of_get_property(dn, "NVDA,BMP", &size);
75 if (data) { 75 if (data && size) {
76 bios->size = size; 76 bios->size = size;
77 bios->data = kmalloc(bios->size, GFP_KERNEL); 77 bios->data = kmalloc(bios->size, GFP_KERNEL);
78 if (bios->data) 78 if (bios->data)
@@ -104,6 +104,9 @@ nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
104 goto out; 104 goto out;
105 105
106 bios->size = nv_rd08(bios, 0x700002) * 512; 106 bios->size = nv_rd08(bios, 0x700002) * 512;
107 if (!bios->size)
108 goto out;
109
107 bios->data = kmalloc(bios->size, GFP_KERNEL); 110 bios->data = kmalloc(bios->size, GFP_KERNEL);
108 if (bios->data) { 111 if (bios->data) {
109 for (i = 0; i < bios->size; i++) 112 for (i = 0; i < bios->size; i++)
@@ -155,6 +158,9 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
155 158
156 /* read entire bios image to system memory */ 159 /* read entire bios image to system memory */
157 bios->size = nv_rd08(bios, 0x300002) * 512; 160 bios->size = nv_rd08(bios, 0x300002) * 512;
161 if (!bios->size)
162 goto out;
163
158 bios->data = kmalloc(bios->size, GFP_KERNEL); 164 bios->data = kmalloc(bios->size, GFP_KERNEL);
159 if (bios->data) { 165 if (bios->data) {
160 for (i = 0; i < bios->size; i++) 166 for (i = 0; i < bios->size; i++)
@@ -186,14 +192,22 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
186{ 192{
187 struct pci_dev *pdev = nv_device(bios)->pdev; 193 struct pci_dev *pdev = nv_device(bios)->pdev;
188 int ret, cnt, i; 194 int ret, cnt, i;
189 u8 data[3];
190 195
191 if (!nouveau_acpi_rom_supported(pdev)) 196 if (!nouveau_acpi_rom_supported(pdev)) {
197 bios->data = NULL;
192 return; 198 return;
199 }
193 200
194 bios->size = 0; 201 bios->size = 0;
195 if (nouveau_acpi_get_bios_chunk(data, 0, 3) == 3) 202 bios->data = kmalloc(4096, GFP_KERNEL);
196 bios->size = data[2] * 512; 203 if (bios->data) {
204 if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096)
205 bios->size = bios->data[2] * 512;
206 kfree(bios->data);
207 }
208
209 if (!bios->size)
210 return;
197 211
198 bios->data = kmalloc(bios->size, GFP_KERNEL); 212 bios->data = kmalloc(bios->size, GFP_KERNEL);
199 for (i = 0; bios->data && i < bios->size; i += cnt) { 213 for (i = 0; bios->data && i < bios->size; i += cnt) {
@@ -229,12 +243,14 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios)
229static int 243static int
230nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) 244nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
231{ 245{
232 if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) { 246 if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 ||
247 bios->data[1] != 0xAA) {
233 nv_info(bios, "... signature not found\n"); 248 nv_info(bios, "... signature not found\n");
234 return 0; 249 return 0;
235 } 250 }
236 251
237 if (nvbios_checksum(bios->data, bios->data[2] * 512)) { 252 if (nvbios_checksum(bios->data,
253 min_t(u32, bios->data[2] * 512, bios->size))) {
238 nv_info(bios, "... checksum invalid\n"); 254 nv_info(bios, "... checksum invalid\n");
239 /* if a ro image is somewhat bad, it's probably all rubbish */ 255 /* if a ro image is somewhat bad, it's probably all rubbish */
240 return writeable ? 2 : 1; 256 return writeable ? 2 : 1;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
index 5e5f4cddae3c..f835501203e5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -157,11 +157,10 @@ pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
157 while (map->reg) { 157 while (map->reg) {
158 if (map->reg == reg && *ver >= 0x20) { 158 if (map->reg == reg && *ver >= 0x20) {
159 u16 addr = (data += hdr); 159 u16 addr = (data += hdr);
160 *type = map->type;
160 while (cnt--) { 161 while (cnt--) {
161 if (nv_ro32(bios, data) == map->reg) { 162 if (nv_ro32(bios, data) == map->reg)
162 *type = map->type;
163 return data; 163 return data;
164 }
165 data += *len; 164 data += *len;
166 } 165 }
167 return addr; 166 return addr;
@@ -200,11 +199,10 @@ pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
200 while (map->reg) { 199 while (map->reg) {
201 if (map->type == type && *ver >= 0x20) { 200 if (map->type == type && *ver >= 0x20) {
202 u16 addr = (data += hdr); 201 u16 addr = (data += hdr);
202 *reg = map->reg;
203 while (cnt--) { 203 while (cnt--) {
204 if (nv_ro32(bios, data) == map->reg) { 204 if (nv_ro32(bios, data) == map->reg)
205 *reg = map->reg;
206 return data; 205 return data;
207 }
208 data += *len; 206 data += *len;
209 } 207 }
210 return addr; 208 return addr;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 42d7539e6525..27fb1af7a779 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -237,6 +237,7 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
237 return ret; 237 return ret;
238 238
239 priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12; 239 priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
240 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
240 break; 241 break;
241 default: 242 default:
242 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 243 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
index 0203e1e12caa..49050d991e75 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -92,7 +92,8 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
92 struct nv04_vmmgr_priv *priv; 92 struct nv04_vmmgr_priv *priv;
93 int ret; 93 int ret;
94 94
95 if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { 95 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
96 !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
96 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, 97 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
97 data, size, pobject); 98 data, size, pobject);
98 } 99 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
index 0ac18d05a146..aa8131436e3d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -163,7 +163,8 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
163 struct nv04_vmmgr_priv *priv; 163 struct nv04_vmmgr_priv *priv;
164 int ret; 164 int ret;
165 165
166 if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { 166 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
167 !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
167 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, 168 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
168 data, size, pobject); 169 data, size, pobject);
169 } 170 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8f98e5a8c488..d2f8ffeed742 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -530,9 +530,11 @@ nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
530 if (ret) 530 if (ret)
531 goto fail; 531 goto fail;
532 532
533 ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); 533 if (likely(old_bo != new_bo)) {
534 if (ret) 534 ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
535 goto fail_unreserve; 535 if (ret)
536 goto fail_unreserve;
537 }
536 538
537 return 0; 539 return 0;
538 540
@@ -551,8 +553,10 @@ nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
551 nouveau_bo_fence(new_bo, fence); 553 nouveau_bo_fence(new_bo, fence);
552 ttm_bo_unreserve(&new_bo->bo); 554 ttm_bo_unreserve(&new_bo->bo);
553 555
554 nouveau_bo_fence(old_bo, fence); 556 if (likely(old_bo != new_bo)) {
555 ttm_bo_unreserve(&old_bo->bo); 557 nouveau_bo_fence(old_bo, fence);
558 ttm_bo_unreserve(&old_bo->bo);
559 }
556 560
557 nouveau_bo_unpin(old_bo); 561 nouveau_bo_unpin(old_bo);
558} 562}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 49cbb3795a10..ba498f8e47a2 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -184,6 +184,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
184 struct radeon_backlight_privdata *pdata; 184 struct radeon_backlight_privdata *pdata;
185 struct radeon_encoder_atom_dig *dig; 185 struct radeon_encoder_atom_dig *dig;
186 u8 backlight_level; 186 u8 backlight_level;
187 char bl_name[16];
187 188
188 if (!radeon_encoder->enc_priv) 189 if (!radeon_encoder->enc_priv)
189 return; 190 return;
@@ -203,7 +204,9 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
203 memset(&props, 0, sizeof(props)); 204 memset(&props, 0, sizeof(props));
204 props.max_brightness = RADEON_MAX_BL_LEVEL; 205 props.max_brightness = RADEON_MAX_BL_LEVEL;
205 props.type = BACKLIGHT_RAW; 206 props.type = BACKLIGHT_RAW;
206 bd = backlight_device_register("radeon_bl", &drm_connector->kdev, 207 snprintf(bl_name, sizeof(bl_name),
208 "radeon_bl%d", dev->primary->index);
209 bd = backlight_device_register(bl_name, &drm_connector->kdev,
207 pdata, &radeon_atom_backlight_ops, &props); 210 pdata, &radeon_atom_backlight_ops, &props);
208 if (IS_ERR(bd)) { 211 if (IS_ERR(bd)) {
209 DRM_ERROR("Backlight registration failed\n"); 212 DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 573ed1bc6cf7..30271b641913 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2829,6 +2829,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
2829 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: 2829 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
2830 return true; 2830 return true;
2831 default: 2831 default:
2832 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
2832 return false; 2833 return false;
2833 } 2834 }
2834} 2835}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8c74c729586d..81e6a568c29d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1538,26 +1538,31 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1538{ 1538{
1539 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 1539 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
1540 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 1540 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
1541 int i;
1542 1541
1543 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2)); 1542 while (count) {
1544 radeon_ring_write(ring, pe); 1543 unsigned ndw = 1 + count * 2;
1545 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 1544 if (ndw > 0x3FFF)
1546 for (i = 0; i < count; ++i) { 1545 ndw = 0x3FFF;
1547 uint64_t value = 0; 1546
1548 if (flags & RADEON_VM_PAGE_SYSTEM) { 1547 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
1549 value = radeon_vm_map_gart(rdev, addr); 1548 radeon_ring_write(ring, pe);
1550 value &= 0xFFFFFFFFFFFFF000ULL; 1549 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1551 addr += incr; 1550 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
1552 1551 uint64_t value = 0;
1553 } else if (flags & RADEON_VM_PAGE_VALID) { 1552 if (flags & RADEON_VM_PAGE_SYSTEM) {
1554 value = addr; 1553 value = radeon_vm_map_gart(rdev, addr);
1555 addr += incr; 1554 value &= 0xFFFFFFFFFFFFF000ULL;
1556 } 1555 addr += incr;
1556
1557 } else if (flags & RADEON_VM_PAGE_VALID) {
1558 value = addr;
1559 addr += incr;
1560 }
1557 1561
1558 value |= r600_flags; 1562 value |= r600_flags;
1559 radeon_ring_write(ring, value); 1563 radeon_ring_write(ring, value);
1560 radeon_ring_write(ring, upper_32_bits(value)); 1564 radeon_ring_write(ring, upper_32_bits(value));
1565 }
1561 } 1566 }
1562} 1567}
1563 1568
@@ -1586,4 +1591,8 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1586 /* bits 0-7 are the VM contexts0-7 */ 1591 /* bits 0-7 are the VM contexts0-7 */
1587 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); 1592 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
1588 radeon_ring_write(ring, 1 << vm->id); 1593 radeon_ring_write(ring, 1 << vm->id);
1594
1595 /* sync PFP to ME, otherwise we might get invalid PFP reads */
1596 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
1597 radeon_ring_write(ring, 0x0);
1589} 1598}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2423d1b5d385..cbef6815907a 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -502,6 +502,7 @@
502#define PACKET3_MPEG_INDEX 0x3A 502#define PACKET3_MPEG_INDEX 0x3A
503#define PACKET3_WAIT_REG_MEM 0x3C 503#define PACKET3_WAIT_REG_MEM 0x3C
504#define PACKET3_MEM_WRITE 0x3D 504#define PACKET3_MEM_WRITE 0x3D
505#define PACKET3_PFP_SYNC_ME 0x42
505#define PACKET3_SURFACE_SYNC 0x43 506#define PACKET3_SURFACE_SYNC 0x43
506# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 507# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
507# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) 508# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 1aa3f910b993..37f6a907aea4 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -87,7 +87,7 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
87 atpx_arg_elements[1].integer.value = 0; 87 atpx_arg_elements[1].integer.value = 0;
88 } 88 }
89 89
90 status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer); 90 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
91 91
92 /* Fail only if calling the method fails and ATPX is supported */ 92 /* Fail only if calling the method fails and ATPX is supported */
93 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 93 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -373,11 +373,11 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
373} 373}
374 374
375/** 375/**
376 * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles 376 * radeon_atpx_pci_probe_handle - look up the ATPX handle
377 * 377 *
378 * @pdev: pci device 378 * @pdev: pci device
379 * 379 *
380 * Look up the ATPX and ATRM handles (all asics). 380 * Look up the ATPX handles (all asics).
381 * Returns true if the handles are found, false if not. 381 * Returns true if the handles are found, false if not.
382 */ 382 */
383static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) 383static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bd13ca09eb62..e2f5f888c374 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -355,6 +355,8 @@ int radeon_wb_init(struct radeon_device *rdev)
355 */ 355 */
356void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 356void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
357{ 357{
358 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
359
358 mc->vram_start = base; 360 mc->vram_start = base;
359 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 361 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
360 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 362 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
@@ -368,8 +370,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
368 mc->mc_vram_size = mc->aper_size; 370 mc->mc_vram_size = mc->aper_size;
369 } 371 }
370 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 372 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
371 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) 373 if (limit && limit < mc->real_vram_size)
372 mc->real_vram_size = radeon_vram_limit; 374 mc->real_vram_size = limit;
373 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 375 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
374 mc->mc_vram_size >> 20, mc->vram_start, 376 mc->mc_vram_size >> 20, mc->vram_start,
375 mc->vram_end, mc->real_vram_size >> 20); 377 mc->vram_end, mc->real_vram_size >> 20);
@@ -835,6 +837,19 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
835} 837}
836 838
837/** 839/**
840 * radeon_check_pot_argument - check that argument is a power of two
841 *
842 * @arg: value to check
843 *
844 * Validates that a certain argument is a power of two (all asics).
845 * Returns true if argument is valid.
846 */
847static bool radeon_check_pot_argument(int arg)
848{
849 return (arg & (arg - 1)) == 0;
850}
851
852/**
838 * radeon_check_arguments - validate module params 853 * radeon_check_arguments - validate module params
839 * 854 *
840 * @rdev: radeon_device pointer 855 * @rdev: radeon_device pointer
@@ -845,52 +860,25 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
845static void radeon_check_arguments(struct radeon_device *rdev) 860static void radeon_check_arguments(struct radeon_device *rdev)
846{ 861{
847 /* vramlimit must be a power of two */ 862 /* vramlimit must be a power of two */
848 switch (radeon_vram_limit) { 863 if (!radeon_check_pot_argument(radeon_vram_limit)) {
849 case 0:
850 case 4:
851 case 8:
852 case 16:
853 case 32:
854 case 64:
855 case 128:
856 case 256:
857 case 512:
858 case 1024:
859 case 2048:
860 case 4096:
861 break;
862 default:
863 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 864 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
864 radeon_vram_limit); 865 radeon_vram_limit);
865 radeon_vram_limit = 0; 866 radeon_vram_limit = 0;
866 break;
867 } 867 }
868 radeon_vram_limit = radeon_vram_limit << 20; 868
869 /* gtt size must be power of two and greater or equal to 32M */ 869 /* gtt size must be power of two and greater or equal to 32M */
870 switch (radeon_gart_size) { 870 if (radeon_gart_size < 32) {
871 case 4:
872 case 8:
873 case 16:
874 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 871 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
875 radeon_gart_size); 872 radeon_gart_size);
876 radeon_gart_size = 512; 873 radeon_gart_size = 512;
877 break; 874
878 case 32: 875 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
879 case 64:
880 case 128:
881 case 256:
882 case 512:
883 case 1024:
884 case 2048:
885 case 4096:
886 break;
887 default:
888 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 876 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
889 radeon_gart_size); 877 radeon_gart_size);
890 radeon_gart_size = 512; 878 radeon_gart_size = 512;
891 break;
892 } 879 }
893 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 880 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
881
894 /* AGP mode can only be -1, 1, 2, 4, 8 */ 882 /* AGP mode can only be -1, 1, 2, 4, 8 */
895 switch (radeon_agpmode) { 883 switch (radeon_agpmode) {
896 case -1: 884 case -1:
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a7677dd1ce98..4debd60e5aa6 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -355,14 +355,13 @@ int radeon_gart_init(struct radeon_device *rdev)
355 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", 355 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
356 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); 356 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
357 /* Allocate pages table */ 357 /* Allocate pages table */
358 rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages, 358 rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
359 GFP_KERNEL);
360 if (rdev->gart.pages == NULL) { 359 if (rdev->gart.pages == NULL) {
361 radeon_gart_fini(rdev); 360 radeon_gart_fini(rdev);
362 return -ENOMEM; 361 return -ENOMEM;
363 } 362 }
364 rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) * 363 rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
365 rdev->gart.num_cpu_pages, GFP_KERNEL); 364 rdev->gart.num_cpu_pages);
366 if (rdev->gart.pages_addr == NULL) { 365 if (rdev->gart.pages_addr == NULL) {
367 radeon_gart_fini(rdev); 366 radeon_gart_fini(rdev);
368 return -ENOMEM; 367 return -ENOMEM;
@@ -388,8 +387,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
388 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); 387 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
389 } 388 }
390 rdev->gart.ready = false; 389 rdev->gart.ready = false;
391 kfree(rdev->gart.pages); 390 vfree(rdev->gart.pages);
392 kfree(rdev->gart.pages_addr); 391 vfree(rdev->gart.pages_addr);
393 rdev->gart.pages = NULL; 392 rdev->gart.pages = NULL;
394 rdev->gart.pages_addr = NULL; 393 rdev->gart.pages_addr = NULL;
395 394
@@ -577,7 +576,7 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
577 * 576 *
578 * Global and local mutex must be locked! 577 * Global and local mutex must be locked!
579 */ 578 */
580int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) 579static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
581{ 580{
582 struct radeon_vm *vm_evict; 581 struct radeon_vm *vm_evict;
583 582
@@ -1036,8 +1035,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1036 pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); 1035 pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
1037 pte += (addr & mask) * 8; 1036 pte += (addr & mask) * 8;
1038 1037
1039 if (((last_pte + 8 * count) != pte) || 1038 if ((last_pte + 8 * count) != pte) {
1040 ((count + nptes) > 1 << 11)) {
1041 1039
1042 if (count) { 1040 if (count) {
1043 radeon_asic_vm_set_page(rdev, last_pte, 1041 radeon_asic_vm_set_page(rdev, last_pte,
@@ -1148,17 +1146,17 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1148 1146
1149 if (RADEON_VM_BLOCK_SIZE > 11) 1147 if (RADEON_VM_BLOCK_SIZE > 11)
1150 /* reserve space for one header for every 2k dwords */ 1148 /* reserve space for one header for every 2k dwords */
1151 ndw += (nptes >> 11) * 3; 1149 ndw += (nptes >> 11) * 4;
1152 else 1150 else
1153 /* reserve space for one header for 1151 /* reserve space for one header for
1154 every (1 << BLOCK_SIZE) entries */ 1152 every (1 << BLOCK_SIZE) entries */
1155 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3; 1153 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
1156 1154
1157 /* reserve space for pte addresses */ 1155 /* reserve space for pte addresses */
1158 ndw += nptes * 2; 1156 ndw += nptes * 2;
1159 1157
1160 /* reserve space for one header for every 2k dwords */ 1158 /* reserve space for one header for every 2k dwords */
1161 ndw += (npdes >> 11) * 3; 1159 ndw += (npdes >> 11) * 4;
1162 1160
1163 /* reserve space for pde addresses */ 1161 /* reserve space for pde addresses */
1164 ndw += npdes * 2; 1162 ndw += npdes * 2;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f38fbcc46935..fe5c1f6b7957 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -53,6 +53,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
53 struct drm_gem_object **obj) 53 struct drm_gem_object **obj)
54{ 54{
55 struct radeon_bo *robj; 55 struct radeon_bo *robj;
56 unsigned long max_size;
56 int r; 57 int r;
57 58
58 *obj = NULL; 59 *obj = NULL;
@@ -60,11 +61,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
60 if (alignment < PAGE_SIZE) { 61 if (alignment < PAGE_SIZE) {
61 alignment = PAGE_SIZE; 62 alignment = PAGE_SIZE;
62 } 63 }
64
65 /* maximun bo size is the minimun btw visible vram and gtt size */
66 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
67 if (size > max_size) {
68 printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
69 __func__, __LINE__, size >> 20, max_size >> 20);
70 return -ENOMEM;
71 }
72
73retry:
63 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); 74 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
64 if (r) { 75 if (r) {
65 if (r != -ERESTARTSYS) 76 if (r != -ERESTARTSYS) {
77 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
78 initial_domain |= RADEON_GEM_DOMAIN_GTT;
79 goto retry;
80 }
66 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 81 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
67 size, initial_domain, alignment, r); 82 size, initial_domain, alignment, r);
83 }
68 return r; 84 return r;
69 } 85 }
70 *obj = &robj->gem_base; 86 *obj = &robj->gem_base;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index a13ad9d707cf..0063df9d166d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -370,6 +370,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
370 struct backlight_properties props; 370 struct backlight_properties props;
371 struct radeon_backlight_privdata *pdata; 371 struct radeon_backlight_privdata *pdata;
372 uint8_t backlight_level; 372 uint8_t backlight_level;
373 char bl_name[16];
373 374
374 if (!radeon_encoder->enc_priv) 375 if (!radeon_encoder->enc_priv)
375 return; 376 return;
@@ -389,7 +390,9 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
389 memset(&props, 0, sizeof(props)); 390 memset(&props, 0, sizeof(props));
390 props.max_brightness = RADEON_MAX_BL_LEVEL; 391 props.max_brightness = RADEON_MAX_BL_LEVEL;
391 props.type = BACKLIGHT_RAW; 392 props.type = BACKLIGHT_RAW;
392 bd = backlight_device_register("radeon_bl", &drm_connector->kdev, 393 snprintf(bl_name, sizeof(bl_name),
394 "radeon_bl%d", dev->primary->index);
395 bd = backlight_device_register(bl_name, &drm_connector->kdev,
393 pdata, &radeon_backlight_ops, &props); 396 pdata, &radeon_backlight_ops, &props);
394 if (IS_ERR(bd)) { 397 if (IS_ERR(bd)) {
395 DRM_ERROR("Backlight registration failed\n"); 398 DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8b27dd6e3144..b91118ccef86 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -105,7 +105,6 @@ int radeon_bo_create(struct radeon_device *rdev,
105 struct radeon_bo *bo; 105 struct radeon_bo *bo;
106 enum ttm_bo_type type; 106 enum ttm_bo_type type;
107 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 107 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
108 unsigned long max_size = 0;
109 size_t acc_size; 108 size_t acc_size;
110 int r; 109 int r;
111 110
@@ -121,18 +120,9 @@ int radeon_bo_create(struct radeon_device *rdev,
121 } 120 }
122 *bo_ptr = NULL; 121 *bo_ptr = NULL;
123 122
124 /* maximun bo size is the minimun btw visible vram and gtt size */
125 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
126 if ((page_align << PAGE_SHIFT) >= max_size) {
127 printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
128 __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
129 return -ENOMEM;
130 }
131
132 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 123 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
133 sizeof(struct radeon_bo)); 124 sizeof(struct radeon_bo));
134 125
135retry:
136 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 126 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
137 if (bo == NULL) 127 if (bo == NULL)
138 return -ENOMEM; 128 return -ENOMEM;
@@ -154,15 +144,6 @@ retry:
154 acc_size, sg, &radeon_ttm_bo_destroy); 144 acc_size, sg, &radeon_ttm_bo_destroy);
155 up_read(&rdev->pm.mclk_lock); 145 up_read(&rdev->pm.mclk_lock);
156 if (unlikely(r != 0)) { 146 if (unlikely(r != 0)) {
157 if (r != -ERESTARTSYS) {
158 if (domain == RADEON_GEM_DOMAIN_VRAM) {
159 domain |= RADEON_GEM_DOMAIN_GTT;
160 goto retry;
161 }
162 dev_err(rdev->dev,
163 "object_init failed for (%lu, 0x%08X)\n",
164 size, domain);
165 }
166 return r; 147 return r;
167 } 148 }
168 *bo_ptr = bo; 149 *bo_ptr = bo;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index df8dd7701643..b0db712060fb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2808,26 +2808,31 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2808{ 2808{
2809 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 2809 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
2810 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 2810 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2811 int i;
2812 uint64_t value;
2813 2811
2814 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2)); 2812 while (count) {
2815 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2813 unsigned ndw = 2 + count * 2;
2816 WRITE_DATA_DST_SEL(1))); 2814 if (ndw > 0x3FFE)
2817 radeon_ring_write(ring, pe); 2815 ndw = 0x3FFE;
2818 radeon_ring_write(ring, upper_32_bits(pe)); 2816
2819 for (i = 0; i < count; ++i) { 2817 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
2820 if (flags & RADEON_VM_PAGE_SYSTEM) { 2818 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2821 value = radeon_vm_map_gart(rdev, addr); 2819 WRITE_DATA_DST_SEL(1)));
2822 value &= 0xFFFFFFFFFFFFF000ULL; 2820 radeon_ring_write(ring, pe);
2823 } else if (flags & RADEON_VM_PAGE_VALID) 2821 radeon_ring_write(ring, upper_32_bits(pe));
2824 value = addr; 2822 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
2825 else 2823 uint64_t value;
2826 value = 0; 2824 if (flags & RADEON_VM_PAGE_SYSTEM) {
2827 addr += incr; 2825 value = radeon_vm_map_gart(rdev, addr);
2828 value |= r600_flags; 2826 value &= 0xFFFFFFFFFFFFF000ULL;
2829 radeon_ring_write(ring, value); 2827 } else if (flags & RADEON_VM_PAGE_VALID)
2830 radeon_ring_write(ring, upper_32_bits(value)); 2828 value = addr;
2829 else
2830 value = 0;
2831 addr += incr;
2832 value |= r600_flags;
2833 radeon_ring_write(ring, value);
2834 radeon_ring_write(ring, upper_32_bits(value));
2835 }
2831 } 2836 }
2832} 2837}
2833 2838
@@ -2868,6 +2873,10 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2868 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 2873 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2869 radeon_ring_write(ring, 0); 2874 radeon_ring_write(ring, 0);
2870 radeon_ring_write(ring, 1 << vm->id); 2875 radeon_ring_write(ring, 1 << vm->id);
2876
2877 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2878 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2879 radeon_ring_write(ring, 0x0);
2871} 2880}
2872 2881
2873/* 2882/*
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index c71d493fd0c5..1c350fc4e449 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -201,6 +201,8 @@ static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
201 goto done; 201 goto done;
202 } 202 }
203 203
204 platform_set_drvdata(pdev, sdev);
205
204done: 206done:
205 if (ret) 207 if (ret)
206 shmob_drm_unload(dev); 208 shmob_drm_unload(dev);
@@ -299,11 +301,9 @@ static struct drm_driver shmob_drm_driver = {
299#if CONFIG_PM_SLEEP 301#if CONFIG_PM_SLEEP
300static int shmob_drm_pm_suspend(struct device *dev) 302static int shmob_drm_pm_suspend(struct device *dev)
301{ 303{
302 struct platform_device *pdev = to_platform_device(dev); 304 struct shmob_drm_device *sdev = dev_get_drvdata(dev);
303 struct drm_device *ddev = platform_get_drvdata(pdev);
304 struct shmob_drm_device *sdev = ddev->dev_private;
305 305
306 drm_kms_helper_poll_disable(ddev); 306 drm_kms_helper_poll_disable(sdev->ddev);
307 shmob_drm_crtc_suspend(&sdev->crtc); 307 shmob_drm_crtc_suspend(&sdev->crtc);
308 308
309 return 0; 309 return 0;
@@ -311,9 +311,7 @@ static int shmob_drm_pm_suspend(struct device *dev)
311 311
312static int shmob_drm_pm_resume(struct device *dev) 312static int shmob_drm_pm_resume(struct device *dev)
313{ 313{
314 struct platform_device *pdev = to_platform_device(dev); 314 struct shmob_drm_device *sdev = dev_get_drvdata(dev);
315 struct drm_device *ddev = platform_get_drvdata(pdev);
316 struct shmob_drm_device *sdev = ddev->dev_private;
317 315
318 mutex_lock(&sdev->ddev->mode_config.mutex); 316 mutex_lock(&sdev->ddev->mode_config.mutex);
319 shmob_drm_crtc_resume(&sdev->crtc); 317 shmob_drm_crtc_resume(&sdev->crtc);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 402ab69f9f99..bf6e4b5a73b5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -580,6 +580,7 @@ retry:
580 if (unlikely(ret != 0)) 580 if (unlikely(ret != 0))
581 return ret; 581 return ret;
582 582
583retry_reserve:
583 spin_lock(&glob->lru_lock); 584 spin_lock(&glob->lru_lock);
584 585
585 if (unlikely(list_empty(&bo->ddestroy))) { 586 if (unlikely(list_empty(&bo->ddestroy))) {
@@ -587,14 +588,20 @@ retry:
587 return 0; 588 return 0;
588 } 589 }
589 590
590 ret = ttm_bo_reserve_locked(bo, interruptible, 591 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
591 no_wait_reserve, false, 0);
592 592
593 if (unlikely(ret != 0)) { 593 if (unlikely(ret == -EBUSY)) {
594 spin_unlock(&glob->lru_lock); 594 spin_unlock(&glob->lru_lock);
595 return ret; 595 if (likely(!no_wait_reserve))
596 ret = ttm_bo_wait_unreserved(bo, interruptible);
597 if (unlikely(ret != 0))
598 return ret;
599
600 goto retry_reserve;
596 } 601 }
597 602
603 BUG_ON(ret != 0);
604
598 /** 605 /**
599 * We can re-check for sync object without taking 606 * We can re-check for sync object without taking
600 * the bo::lock since setting the sync object requires 607 * the bo::lock since setting the sync object requires
@@ -811,17 +818,14 @@ retry:
811 no_wait_reserve, no_wait_gpu); 818 no_wait_reserve, no_wait_gpu);
812 kref_put(&bo->list_kref, ttm_bo_release_list); 819 kref_put(&bo->list_kref, ttm_bo_release_list);
813 820
814 if (likely(ret == 0 || ret == -ERESTARTSYS)) 821 return ret;
815 return ret;
816
817 goto retry;
818 } 822 }
819 823
820 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); 824 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
821 825
822 if (unlikely(ret == -EBUSY)) { 826 if (unlikely(ret == -EBUSY)) {
823 spin_unlock(&glob->lru_lock); 827 spin_unlock(&glob->lru_lock);
824 if (likely(!no_wait_gpu)) 828 if (likely(!no_wait_reserve))
825 ret = ttm_bo_wait_unreserved(bo, interruptible); 829 ret = ttm_bo_wait_unreserved(bo, interruptible);
826 830
827 kref_put(&bo->list_kref, ttm_bo_release_list); 831 kref_put(&bo->list_kref, ttm_bo_release_list);
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 6ae2ac47c9c8..f0f8928b3c8a 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -292,7 +292,6 @@ static int evdev_release(struct inode *inode, struct file *file)
292 kfree(client); 292 kfree(client);
293 293
294 evdev_close_device(evdev); 294 evdev_close_device(evdev);
295 put_device(&evdev->dev);
296 295
297 return 0; 296 return 0;
298} 297}
@@ -331,7 +330,6 @@ static int evdev_open(struct inode *inode, struct file *file)
331 file->private_data = client; 330 file->private_data = client;
332 nonseekable_open(inode, file); 331 nonseekable_open(inode, file);
333 332
334 get_device(&evdev->dev);
335 return 0; 333 return 0;
336 334
337 err_free_client: 335 err_free_client:
@@ -1001,6 +999,7 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
1001 goto err_free_evdev; 999 goto err_free_evdev;
1002 1000
1003 cdev_init(&evdev->cdev, &evdev_fops); 1001 cdev_init(&evdev->cdev, &evdev_fops);
1002 evdev->cdev.kobj.parent = &evdev->dev.kobj;
1004 error = cdev_add(&evdev->cdev, evdev->dev.devt, 1); 1003 error = cdev_add(&evdev->cdev, evdev->dev.devt, 1);
1005 if (error) 1004 if (error)
1006 goto err_unregister_handle; 1005 goto err_unregister_handle;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index b62b5891f399..f362883c94e3 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -243,7 +243,6 @@ static int joydev_release(struct inode *inode, struct file *file)
243 kfree(client); 243 kfree(client);
244 244
245 joydev_close_device(joydev); 245 joydev_close_device(joydev);
246 put_device(&joydev->dev);
247 246
248 return 0; 247 return 0;
249} 248}
@@ -270,7 +269,6 @@ static int joydev_open(struct inode *inode, struct file *file)
270 file->private_data = client; 269 file->private_data = client;
271 nonseekable_open(inode, file); 270 nonseekable_open(inode, file);
272 271
273 get_device(&joydev->dev);
274 return 0; 272 return 0;
275 273
276 err_free_client: 274 err_free_client:
@@ -858,6 +856,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
858 goto err_free_joydev; 856 goto err_free_joydev;
859 857
860 cdev_init(&joydev->cdev, &joydev_fops); 858 cdev_init(&joydev->cdev, &joydev_fops);
859 joydev->cdev.kobj.parent = &joydev->dev.kobj;
861 error = cdev_add(&joydev->cdev, joydev->dev.devt, 1); 860 error = cdev_add(&joydev->cdev, joydev->dev.devt, 1);
862 if (error) 861 if (error)
863 goto err_unregister_handle; 862 goto err_unregister_handle;
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index a1b4c37956b2..8f02e3d0e712 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -523,7 +523,6 @@ static int mousedev_release(struct inode *inode, struct file *file)
523 kfree(client); 523 kfree(client);
524 524
525 mousedev_close_device(mousedev); 525 mousedev_close_device(mousedev);
526 put_device(&mousedev->dev);
527 526
528 return 0; 527 return 0;
529} 528}
@@ -558,7 +557,6 @@ static int mousedev_open(struct inode *inode, struct file *file)
558 file->private_data = client; 557 file->private_data = client;
559 nonseekable_open(inode, file); 558 nonseekable_open(inode, file);
560 559
561 get_device(&mousedev->dev);
562 return 0; 560 return 0;
563 561
564 err_free_client: 562 err_free_client:
@@ -892,6 +890,7 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
892 } 890 }
893 891
894 cdev_init(&mousedev->cdev, &mousedev_fops); 892 cdev_init(&mousedev->cdev, &mousedev_fops);
893 mousedev->cdev.kobj.parent = &mousedev->dev.kobj;
895 error = cdev_add(&mousedev->cdev, mousedev->dev.devt, 1); 894 error = cdev_add(&mousedev->cdev, mousedev->dev.devt, 1);
896 if (error) 895 if (error)
897 goto err_unregister_handle; 896 goto err_unregister_handle;
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 9edf9806cff9..2c1e12bf2ab4 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -391,7 +391,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
391 features->pktlen = WACOM_PKGLEN_TPC2FG; 391 features->pktlen = WACOM_PKGLEN_TPC2FG;
392 } 392 }
393 393
394 if (features->type == MTSCREEN) 394 if (features->type == MTSCREEN || WACOM_24HDT)
395 features->pktlen = WACOM_PKGLEN_MTOUCH; 395 features->pktlen = WACOM_PKGLEN_MTOUCH;
396 396
397 if (features->type == BAMBOO_PT) { 397 if (features->type == BAMBOO_PT) {
@@ -402,6 +402,14 @@ static int wacom_parse_hid(struct usb_interface *intf,
402 features->x_max = 402 features->x_max =
403 get_unaligned_le16(&report[i + 8]); 403 get_unaligned_le16(&report[i + 8]);
404 i += 15; 404 i += 15;
405 } else if (features->type == WACOM_24HDT) {
406 features->x_max =
407 get_unaligned_le16(&report[i + 3]);
408 features->x_phy =
409 get_unaligned_le16(&report[i + 8]);
410 features->unit = report[i - 1];
411 features->unitExpo = report[i - 3];
412 i += 12;
405 } else { 413 } else {
406 features->x_max = 414 features->x_max =
407 get_unaligned_le16(&report[i + 3]); 415 get_unaligned_le16(&report[i + 3]);
@@ -434,6 +442,12 @@ static int wacom_parse_hid(struct usb_interface *intf,
434 features->y_phy = 442 features->y_phy =
435 get_unaligned_le16(&report[i + 6]); 443 get_unaligned_le16(&report[i + 6]);
436 i += 7; 444 i += 7;
445 } else if (type == WACOM_24HDT) {
446 features->y_max =
447 get_unaligned_le16(&report[i + 3]);
448 features->y_phy =
449 get_unaligned_le16(&report[i - 2]);
450 i += 7;
437 } else if (type == BAMBOO_PT) { 451 } else if (type == BAMBOO_PT) {
438 features->y_phy = 452 features->y_phy =
439 get_unaligned_le16(&report[i + 3]); 453 get_unaligned_le16(&report[i + 3]);
@@ -541,6 +555,9 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
541 /* MT Tablet PC touch */ 555 /* MT Tablet PC touch */
542 return wacom_set_device_mode(intf, 3, 4, 4); 556 return wacom_set_device_mode(intf, 3, 4, 4);
543 } 557 }
558 else if (features->type == WACOM_24HDT) {
559 return wacom_set_device_mode(intf, 18, 3, 2);
560 }
544 } else if (features->device_type == BTN_TOOL_PEN) { 561 } else if (features->device_type == BTN_TOOL_PEN) {
545 if (features->type <= BAMBOO_PT && features->type != WIRELESS) { 562 if (features->type <= BAMBOO_PT && features->type != WIRELESS) {
546 return wacom_set_device_mode(intf, 2, 2, 2); 563 return wacom_set_device_mode(intf, 2, 2, 2);
@@ -613,6 +630,30 @@ struct wacom_usbdev_data {
613static LIST_HEAD(wacom_udev_list); 630static LIST_HEAD(wacom_udev_list);
614static DEFINE_MUTEX(wacom_udev_list_lock); 631static DEFINE_MUTEX(wacom_udev_list_lock);
615 632
633static struct usb_device *wacom_get_sibling(struct usb_device *dev, int vendor, int product)
634{
635 int port1;
636 struct usb_device *sibling;
637
638 if (vendor == 0 && product == 0)
639 return dev;
640
641 if (dev->parent == NULL)
642 return NULL;
643
644 usb_hub_for_each_child(dev->parent, port1, sibling) {
645 struct usb_device_descriptor *d;
646 if (sibling == NULL)
647 continue;
648
649 d = &sibling->descriptor;
650 if (d->idVendor == vendor && d->idProduct == product)
651 return sibling;
652 }
653
654 return NULL;
655}
656
616static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev) 657static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev)
617{ 658{
618 struct wacom_usbdev_data *data; 659 struct wacom_usbdev_data *data;
@@ -1257,13 +1298,19 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
1257 strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name)); 1298 strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
1258 1299
1259 if (features->quirks & WACOM_QUIRK_MULTI_INPUT) { 1300 if (features->quirks & WACOM_QUIRK_MULTI_INPUT) {
1301 struct usb_device *other_dev;
1302
1260 /* Append the device type to the name */ 1303 /* Append the device type to the name */
1261 strlcat(wacom_wac->name, 1304 strlcat(wacom_wac->name,
1262 features->device_type == BTN_TOOL_PEN ? 1305 features->device_type == BTN_TOOL_PEN ?
1263 " Pen" : " Finger", 1306 " Pen" : " Finger",
1264 sizeof(wacom_wac->name)); 1307 sizeof(wacom_wac->name));
1265 1308
1266 error = wacom_add_shared_data(wacom_wac, dev); 1309
1310 other_dev = wacom_get_sibling(dev, features->oVid, features->oPid);
1311 if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL)
1312 other_dev = dev;
1313 error = wacom_add_shared_data(wacom_wac, other_dev);
1267 if (error) 1314 if (error)
1268 goto fail3; 1315 goto fail3;
1269 } 1316 }
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index c3468c8dbd89..aa6010131179 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -806,6 +806,70 @@ static int find_slot_from_contactid(struct wacom_wac *wacom, int contactid)
806 return -1; 806 return -1;
807} 807}
808 808
809static int int_dist(int x1, int y1, int x2, int y2)
810{
811 int x = x2 - x1;
812 int y = y2 - y1;
813
814 return int_sqrt(x*x + y*y);
815}
816
817static int wacom_24hdt_irq(struct wacom_wac *wacom)
818{
819 struct input_dev *input = wacom->input;
820 char *data = wacom->data;
821 int i;
822 int current_num_contacts = data[61];
823 int contacts_to_send = 0;
824
825 /*
826 * First packet resets the counter since only the first
827 * packet in series will have non-zero current_num_contacts.
828 */
829 if (current_num_contacts)
830 wacom->num_contacts_left = current_num_contacts;
831
832 /* There are at most 4 contacts per packet */
833 contacts_to_send = min(4, wacom->num_contacts_left);
834
835 for (i = 0; i < contacts_to_send; i++) {
836 int offset = (WACOM_BYTES_PER_24HDT_PACKET * i) + 1;
837 bool touch = data[offset] & 0x1 && !wacom->shared->stylus_in_proximity;
838 int id = data[offset + 1];
839 int slot = find_slot_from_contactid(wacom, id);
840
841 if (slot < 0)
842 continue;
843 input_mt_slot(input, slot);
844 input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
845
846 if (touch) {
847 int t_x = le16_to_cpup((__le16 *)&data[offset + 2]);
848 int c_x = le16_to_cpup((__le16 *)&data[offset + 4]);
849 int t_y = le16_to_cpup((__le16 *)&data[offset + 6]);
850 int c_y = le16_to_cpup((__le16 *)&data[offset + 8]);
851 int w = le16_to_cpup((__le16 *)&data[offset + 10]);
852 int h = le16_to_cpup((__le16 *)&data[offset + 12]);
853
854 input_report_abs(input, ABS_MT_POSITION_X, t_x);
855 input_report_abs(input, ABS_MT_POSITION_Y, t_y);
856 input_report_abs(input, ABS_MT_TOUCH_MAJOR, min(w,h));
857 input_report_abs(input, ABS_MT_WIDTH_MAJOR, min(w, h) + int_dist(t_x, t_y, c_x, c_y));
858 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
859 input_report_abs(input, ABS_MT_ORIENTATION, w > h);
860 }
861 wacom->slots[slot] = touch ? id : -1;
862 }
863
864 input_mt_report_pointer_emulation(input, true);
865
866 wacom->num_contacts_left -= contacts_to_send;
867 if (wacom->num_contacts_left <= 0)
868 wacom->num_contacts_left = 0;
869
870 return 1;
871}
872
809static int wacom_mt_touch(struct wacom_wac *wacom) 873static int wacom_mt_touch(struct wacom_wac *wacom)
810{ 874{
811 struct input_dev *input = wacom->input; 875 struct input_dev *input = wacom->input;
@@ -1255,6 +1319,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
1255 sync = wacom_intuos_irq(wacom_wac); 1319 sync = wacom_intuos_irq(wacom_wac);
1256 break; 1320 break;
1257 1321
1322 case WACOM_24HDT:
1323 sync = wacom_24hdt_irq(wacom_wac);
1324 break;
1325
1258 case INTUOS5S: 1326 case INTUOS5S:
1259 case INTUOS5: 1327 case INTUOS5:
1260 case INTUOS5L: 1328 case INTUOS5L:
@@ -1340,7 +1408,8 @@ void wacom_setup_device_quirks(struct wacom_features *features)
1340 1408
1341 /* these device have multiple inputs */ 1409 /* these device have multiple inputs */
1342 if (features->type >= WIRELESS || 1410 if (features->type >= WIRELESS ||
1343 (features->type >= INTUOS5S && features->type <= INTUOS5L)) 1411 (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
1412 (features->oVid && features->oPid))
1344 features->quirks |= WACOM_QUIRK_MULTI_INPUT; 1413 features->quirks |= WACOM_QUIRK_MULTI_INPUT;
1345 1414
1346 /* quirk for bamboo touch with 2 low res touches */ 1415 /* quirk for bamboo touch with 2 low res touches */
@@ -1575,6 +1644,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1575 __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1644 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1576 break; 1645 break;
1577 1646
1647 case WACOM_24HDT:
1648 if (features->device_type == BTN_TOOL_FINGER) {
1649 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0);
1650 input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0);
1651 input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0);
1652 input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
1653 }
1654 /* fall through */
1655
1578 case MTSCREEN: 1656 case MTSCREEN:
1579 if (features->device_type == BTN_TOOL_FINGER) { 1657 if (features->device_type == BTN_TOOL_FINGER) {
1580 wacom_wac->slots = kmalloc(features->touch_max * 1658 wacom_wac->slots = kmalloc(features->touch_max *
@@ -1869,8 +1947,11 @@ static const struct wacom_features wacom_features_0xF4 =
1869 { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 1947 { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
1870 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1948 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
1871static const struct wacom_features wacom_features_0xF8 = 1949static const struct wacom_features wacom_features_0xF8 =
1872 { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 1950 { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, /* Pen */
1873 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1951 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
1952static const struct wacom_features wacom_features_0xF6 =
1953 { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
1954 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 };
1874static const struct wacom_features wacom_features_0x3F = 1955static const struct wacom_features wacom_features_0x3F =
1875 { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, 1956 { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
1876 63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1957 63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2113,6 +2194,7 @@ const struct usb_device_id wacom_ids[] = {
2113 { USB_DEVICE_WACOM(0x47) }, 2194 { USB_DEVICE_WACOM(0x47) },
2114 { USB_DEVICE_WACOM(0xF4) }, 2195 { USB_DEVICE_WACOM(0xF4) },
2115 { USB_DEVICE_WACOM(0xF8) }, 2196 { USB_DEVICE_WACOM(0xF8) },
2197 { USB_DEVICE_WACOM(0xF6) },
2116 { USB_DEVICE_WACOM(0xFA) }, 2198 { USB_DEVICE_WACOM(0xFA) },
2117 { USB_DEVICE_LENOVO(0x6004) }, 2199 { USB_DEVICE_LENOVO(0x6004) },
2118 { } 2200 { }
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 96c185cc301e..345f1e76975e 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -29,6 +29,7 @@
29 29
30/* wacom data size per MT contact */ 30/* wacom data size per MT contact */
31#define WACOM_BYTES_PER_MT_PACKET 11 31#define WACOM_BYTES_PER_MT_PACKET 11
32#define WACOM_BYTES_PER_24HDT_PACKET 14
32 33
33/* device IDs */ 34/* device IDs */
34#define STYLUS_DEVICE_ID 0x02 35#define STYLUS_DEVICE_ID 0x02
@@ -49,6 +50,7 @@
49#define WACOM_REPORT_TPCHID 15 50#define WACOM_REPORT_TPCHID 15
50#define WACOM_REPORT_TPCST 16 51#define WACOM_REPORT_TPCST 16
51#define WACOM_REPORT_TPC1FGE 18 52#define WACOM_REPORT_TPC1FGE 18
53#define WACOM_REPORT_24HDT 1
52 54
53/* device quirks */ 55/* device quirks */
54#define WACOM_QUIRK_MULTI_INPUT 0x0001 56#define WACOM_QUIRK_MULTI_INPUT 0x0001
@@ -81,6 +83,7 @@ enum {
81 WACOM_MO, 83 WACOM_MO,
82 WIRELESS, 84 WIRELESS,
83 BAMBOO_PT, 85 BAMBOO_PT,
86 WACOM_24HDT,
84 TABLETPC, /* add new TPC below */ 87 TABLETPC, /* add new TPC below */
85 TABLETPCE, 88 TABLETPCE,
86 TABLETPC2FG, 89 TABLETPC2FG,
@@ -109,6 +112,8 @@ struct wacom_features {
109 int distance_fuzz; 112 int distance_fuzz;
110 unsigned quirks; 113 unsigned quirks;
111 unsigned touch_max; 114 unsigned touch_max;
115 int oVid;
116 int oPid;
112}; 117};
113 118
114struct wacom_shared { 119struct wacom_shared {
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 18b0d99bd4d6..81837b0710a9 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1599,21 +1599,46 @@ static void __init free_on_init_error(void)
1599#endif 1599#endif
1600} 1600}
1601 1601
1602/* SB IOAPIC is always on this device in AMD systems */
1603#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
1604
1602static bool __init check_ioapic_information(void) 1605static bool __init check_ioapic_information(void)
1603{ 1606{
1607 bool ret, has_sb_ioapic;
1604 int idx; 1608 int idx;
1605 1609
1606 for (idx = 0; idx < nr_ioapics; idx++) { 1610 has_sb_ioapic = false;
1607 int id = mpc_ioapic_id(idx); 1611 ret = false;
1608 1612
1609 if (get_ioapic_devid(id) < 0) { 1613 for (idx = 0; idx < nr_ioapics; idx++) {
1610 pr_err(FW_BUG "AMD-Vi: IO-APIC[%d] not in IVRS table\n", id); 1614 int devid, id = mpc_ioapic_id(idx);
1611 pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug\n"); 1615
1612 return false; 1616 devid = get_ioapic_devid(id);
1617 if (devid < 0) {
1618 pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id);
1619 ret = false;
1620 } else if (devid == IOAPIC_SB_DEVID) {
1621 has_sb_ioapic = true;
1622 ret = true;
1613 } 1623 }
1614 } 1624 }
1615 1625
1616 return true; 1626 if (!has_sb_ioapic) {
1627 /*
1628 * We expect the SB IOAPIC to be listed in the IVRS
1629 * table. The system timer is connected to the SB IOAPIC
1630 * and if we don't have it in the list the system will
1631 * panic at boot time. This situation usually happens
1632 * when the BIOS is buggy and provides us the wrong
1633 * device id for the IOAPIC in the system.
1634 */
1635 pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n");
1636 }
1637
1638 if (!ret)
1639 pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n");
1640
1641 return ret;
1617} 1642}
1618 1643
1619static void __init free_dma_resources(void) 1644static void __init free_dma_resources(void)
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 0b4d62e0c645..a649f146d17b 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -200,7 +200,7 @@ enum {
200 200
201#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12) 201#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
202#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22) 202#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
203#define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22) 203#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22)
204 204
205#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT) 205#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
206#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT) 206#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 0f1ec9e8ff14..2e39c04fc16b 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1061,8 +1061,10 @@ static int pinctrl_groups_show(struct seq_file *s, void *what)
1061 seq_printf(s, "group: %s\n", gname); 1061 seq_printf(s, "group: %s\n", gname);
1062 for (i = 0; i < num_pins; i++) { 1062 for (i = 0; i < num_pins; i++) {
1063 pname = pin_get_name(pctldev, pins[i]); 1063 pname = pin_get_name(pctldev, pins[i]);
1064 if (WARN_ON(!pname)) 1064 if (WARN_ON(!pname)) {
1065 mutex_unlock(&pinctrl_mutex);
1065 return -EINVAL; 1066 return -EINVAL;
1067 }
1066 seq_printf(s, "pin %d (%s)\n", pins[i], pname); 1068 seq_printf(s, "pin %d (%s)\n", pins[i], pname);
1067 } 1069 }
1068 seq_puts(s, "\n"); 1070 seq_puts(s, "\n");
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 43f474cdc110..baee2cc46a17 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -537,8 +537,6 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
537 seq_puts(s, "Pin config settings per pin group\n"); 537 seq_puts(s, "Pin config settings per pin group\n");
538 seq_puts(s, "Format: group (name): configs\n"); 538 seq_puts(s, "Format: group (name): configs\n");
539 539
540 mutex_lock(&pinctrl_mutex);
541
542 while (selector < ngroups) { 540 while (selector < ngroups) {
543 const char *gname = pctlops->get_group_name(pctldev, selector); 541 const char *gname = pctlops->get_group_name(pctldev, selector);
544 542
@@ -549,8 +547,6 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
549 selector++; 547 selector++;
550 } 548 }
551 549
552 mutex_unlock(&pinctrl_mutex);
553
554 return 0; 550 return 0;
555} 551}
556 552
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 01aea1c3b5fa..cf82d9ce4dee 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -1056,7 +1056,7 @@ static int nmk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
1056 struct nmk_gpio_chip *nmk_chip = 1056 struct nmk_gpio_chip *nmk_chip =
1057 container_of(chip, struct nmk_gpio_chip, chip); 1057 container_of(chip, struct nmk_gpio_chip, chip);
1058 1058
1059 return irq_find_mapping(nmk_chip->domain, offset); 1059 return irq_create_mapping(nmk_chip->domain, offset);
1060} 1060}
1061 1061
1062#ifdef CONFIG_DEBUG_FS 1062#ifdef CONFIG_DEBUG_FS
@@ -1281,7 +1281,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
1281 struct clk *clk; 1281 struct clk *clk;
1282 int secondary_irq; 1282 int secondary_irq;
1283 void __iomem *base; 1283 void __iomem *base;
1284 int irq_start = -1; 1284 int irq_start = 0;
1285 int irq; 1285 int irq;
1286 int ret; 1286 int ret;
1287 1287
@@ -1387,7 +1387,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
1387 1387
1388 if (!np) 1388 if (!np)
1389 irq_start = NOMADIK_GPIO_TO_IRQ(pdata->first_gpio); 1389 irq_start = NOMADIK_GPIO_TO_IRQ(pdata->first_gpio);
1390 nmk_chip->domain = irq_domain_add_simple(NULL, 1390 nmk_chip->domain = irq_domain_add_simple(np,
1391 NMK_GPIO_PER_CHIP, irq_start, 1391 NMK_GPIO_PER_CHIP, irq_start,
1392 &nmk_gpio_irq_simple_ops, nmk_chip); 1392 &nmk_gpio_irq_simple_ops, nmk_chip);
1393 if (!nmk_chip->domain) { 1393 if (!nmk_chip->domain) {
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index 729b686c3ad2..7da0b371fd65 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -464,7 +464,7 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
464 *bank = g->drv_bank; 464 *bank = g->drv_bank;
465 *reg = g->drv_reg; 465 *reg = g->drv_reg;
466 *bit = g->lpmd_bit; 466 *bit = g->lpmd_bit;
467 *width = 1; 467 *width = 2;
468 break; 468 break;
469 case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH: 469 case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH:
470 *bank = g->drv_bank; 470 *bank = g->drv_bank;
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
index 0386fdf0da16..7894f14c7059 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/pinctrl-tegra30.c
@@ -3345,10 +3345,10 @@ static const struct tegra_function tegra30_functions[] = {
3345 FUNCTION(vi_alt3), 3345 FUNCTION(vi_alt3),
3346}; 3346};
3347 3347
3348#define MUXCTL_REG_A 0x3000 3348#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
3349#define PINGROUP_REG_A 0x868 3349#define PINGROUP_REG_A 0x3000 /* bank 1 */
3350 3350
3351#define PINGROUP_REG_Y(r) ((r) - MUXCTL_REG_A) 3351#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
3352#define PINGROUP_REG_N(r) -1 3352#define PINGROUP_REG_N(r) -1
3353 3353
3354#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior) \ 3354#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior) \
@@ -3364,25 +3364,25 @@ static const struct tegra_function tegra30_functions[] = {
3364 }, \ 3364 }, \
3365 .func_safe = TEGRA_MUX_ ## f_safe, \ 3365 .func_safe = TEGRA_MUX_ ## f_safe, \
3366 .mux_reg = PINGROUP_REG_Y(r), \ 3366 .mux_reg = PINGROUP_REG_Y(r), \
3367 .mux_bank = 0, \ 3367 .mux_bank = 1, \
3368 .mux_bit = 0, \ 3368 .mux_bit = 0, \
3369 .pupd_reg = PINGROUP_REG_Y(r), \ 3369 .pupd_reg = PINGROUP_REG_Y(r), \
3370 .pupd_bank = 0, \ 3370 .pupd_bank = 1, \
3371 .pupd_bit = 2, \ 3371 .pupd_bit = 2, \
3372 .tri_reg = PINGROUP_REG_Y(r), \ 3372 .tri_reg = PINGROUP_REG_Y(r), \
3373 .tri_bank = 0, \ 3373 .tri_bank = 1, \
3374 .tri_bit = 4, \ 3374 .tri_bit = 4, \
3375 .einput_reg = PINGROUP_REG_Y(r), \ 3375 .einput_reg = PINGROUP_REG_Y(r), \
3376 .einput_bank = 0, \ 3376 .einput_bank = 1, \
3377 .einput_bit = 5, \ 3377 .einput_bit = 5, \
3378 .odrain_reg = PINGROUP_REG_##od(r), \ 3378 .odrain_reg = PINGROUP_REG_##od(r), \
3379 .odrain_bank = 0, \ 3379 .odrain_bank = 1, \
3380 .odrain_bit = 6, \ 3380 .odrain_bit = 6, \
3381 .lock_reg = PINGROUP_REG_Y(r), \ 3381 .lock_reg = PINGROUP_REG_Y(r), \
3382 .lock_bank = 0, \ 3382 .lock_bank = 1, \
3383 .lock_bit = 7, \ 3383 .lock_bit = 7, \
3384 .ioreset_reg = PINGROUP_REG_##ior(r), \ 3384 .ioreset_reg = PINGROUP_REG_##ior(r), \
3385 .ioreset_bank = 0, \ 3385 .ioreset_bank = 1, \
3386 .ioreset_bit = 8, \ 3386 .ioreset_bit = 8, \
3387 .drv_reg = -1, \ 3387 .drv_reg = -1, \
3388 } 3388 }
@@ -3401,8 +3401,8 @@ static const struct tegra_function tegra30_functions[] = {
3401 .odrain_reg = -1, \ 3401 .odrain_reg = -1, \
3402 .lock_reg = -1, \ 3402 .lock_reg = -1, \
3403 .ioreset_reg = -1, \ 3403 .ioreset_reg = -1, \
3404 .drv_reg = ((r) - PINGROUP_REG_A), \ 3404 .drv_reg = ((r) - DRV_PINGROUP_REG_A), \
3405 .drv_bank = 1, \ 3405 .drv_bank = 0, \
3406 .hsm_bit = hsm_b, \ 3406 .hsm_bit = hsm_b, \
3407 .schmitt_bit = schmitt_b, \ 3407 .schmitt_bit = schmitt_b, \
3408 .lpmd_bit = lpmd_b, \ 3408 .lpmd_bit = lpmd_b, \
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 891cd6c61d0a..4eed51044c5d 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -392,6 +392,8 @@ static int dryice_rtc_probe(struct platform_device *pdev)
392 if (imxdi->ioaddr == NULL) 392 if (imxdi->ioaddr == NULL)
393 return -ENOMEM; 393 return -ENOMEM;
394 394
395 spin_lock_init(&imxdi->irq_lock);
396
395 imxdi->irq = platform_get_irq(pdev, 0); 397 imxdi->irq = platform_get_irq(pdev, 0);
396 if (imxdi->irq < 0) 398 if (imxdi->irq < 0)
397 return imxdi->irq; 399 return imxdi->irq;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index b4d572f65f07..fd00afd8b850 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -377,7 +377,11 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
377 /* Will be done on the slow path. */ 377 /* Will be done on the slow path. */
378 return -EAGAIN; 378 return -EAGAIN;
379 } 379 }
380 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 380 if (stsch_err(schid, &schib)) {
381 /* Subchannel is not provided. */
382 return -ENXIO;
383 }
384 if (!css_sch_is_valid(&schib)) {
381 /* Unusable - ignore. */ 385 /* Unusable - ignore. */
382 return 0; 386 return 0;
383 } 387 }
@@ -536,6 +540,7 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
536 case -ENOMEM: 540 case -ENOMEM:
537 case -EIO: 541 case -EIO:
538 /* These should abort looping */ 542 /* These should abort looping */
543 idset_sch_del_subseq(slow_subchannel_set, schid);
539 break; 544 break;
540 default: 545 default:
541 rc = 0; 546 rc = 0;
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index e6d5f8c49524..199bc6791177 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * Copyright IBM Corp. 2007 2 * Copyright IBM Corp. 2007, 2012
3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
4 */ 4 */
5 5
6#include <linux/vmalloc.h> 6#include <linux/vmalloc.h>
7#include <linux/bitmap.h>
7#include <linux/bitops.h> 8#include <linux/bitops.h>
8#include "idset.h" 9#include "idset.h"
9#include "css.h" 10#include "css.h"
@@ -89,6 +90,14 @@ void idset_sch_del(struct idset *set, struct subchannel_id schid)
89 idset_del(set, schid.ssid, schid.sch_no); 90 idset_del(set, schid.ssid, schid.sch_no);
90} 91}
91 92
93/* Clear ids starting from @schid up to end of subchannel set. */
94void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
95{
96 int pos = schid.ssid * set->num_id + schid.sch_no;
97
98 bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
99}
100
92int idset_sch_contains(struct idset *set, struct subchannel_id schid) 101int idset_sch_contains(struct idset *set, struct subchannel_id schid)
93{ 102{
94 return idset_contains(set, schid.ssid, schid.sch_no); 103 return idset_contains(set, schid.ssid, schid.sch_no);
@@ -111,20 +120,13 @@ int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
111 120
112int idset_is_empty(struct idset *set) 121int idset_is_empty(struct idset *set)
113{ 122{
114 int bitnum; 123 return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
115
116 bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
117 if (bitnum >= set->num_ssid * set->num_id)
118 return 1;
119 return 0;
120} 124}
121 125
122void idset_add_set(struct idset *to, struct idset *from) 126void idset_add_set(struct idset *to, struct idset *from)
123{ 127{
124 unsigned long i, len; 128 int len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
129 __BITOPS_WORDS(from->num_ssid * from->num_id));
125 130
126 len = min(__BITOPS_WORDS(to->num_ssid * to->num_id), 131 bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
127 __BITOPS_WORDS(from->num_ssid * from->num_id));
128 for (i = 0; i < len ; i++)
129 to->bitmap[i] |= from->bitmap[i];
130} 132}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 3d943f03591e..06d3bc01bb09 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2007 2 * Copyright IBM Corp. 2007, 2012
3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
4 */ 4 */
5 5
@@ -17,6 +17,7 @@ void idset_fill(struct idset *set);
17struct idset *idset_sch_new(void); 17struct idset *idset_sch_new(void);
18void idset_sch_add(struct idset *set, struct subchannel_id id); 18void idset_sch_add(struct idset *set, struct subchannel_id id);
19void idset_sch_del(struct idset *set, struct subchannel_id id); 19void idset_sch_del(struct idset *set, struct subchannel_id id);
20void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
20int idset_sch_contains(struct idset *set, struct subchannel_id id); 21int idset_sch_contains(struct idset *set, struct subchannel_id id);
21int idset_sch_get_first(struct idset *set, struct subchannel_id *id); 22int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
22int idset_is_empty(struct idset *set); 23int idset_is_empty(struct idset *set);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 919464102d33..a1db91a99b89 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2186,8 +2186,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2186 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", 2186 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
2187 adev->res.start, pl022->virtbase); 2187 adev->res.start, pl022->virtbase);
2188 2188
2189 pm_runtime_resume(dev);
2190
2191 pl022->clk = devm_clk_get(&adev->dev, NULL); 2189 pl022->clk = devm_clk_get(&adev->dev, NULL);
2192 if (IS_ERR(pl022->clk)) { 2190 if (IS_ERR(pl022->clk)) {
2193 status = PTR_ERR(pl022->clk); 2191 status = PTR_ERR(pl022->clk);
@@ -2292,7 +2290,6 @@ pl022_remove(struct amba_device *adev)
2292 2290
2293 clk_disable(pl022->clk); 2291 clk_disable(pl022->clk);
2294 clk_unprepare(pl022->clk); 2292 clk_unprepare(pl022->clk);
2295 pm_runtime_disable(&adev->dev);
2296 amba_release_regions(adev); 2293 amba_release_regions(adev);
2297 tasklet_disable(&pl022->pump_transfers); 2294 tasklet_disable(&pl022->pump_transfers);
2298 spi_unregister_master(pl022->master); 2295 spi_unregister_master(pl022->master);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 4894bde4bbff..30faf6d4ab91 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -147,8 +147,6 @@ struct rspi_data {
147 unsigned char spsr; 147 unsigned char spsr;
148 148
149 /* for dmaengine */ 149 /* for dmaengine */
150 struct sh_dmae_slave dma_tx;
151 struct sh_dmae_slave dma_rx;
152 struct dma_chan *chan_tx; 150 struct dma_chan *chan_tx;
153 struct dma_chan *chan_rx; 151 struct dma_chan *chan_rx;
154 int irq; 152 int irq;
@@ -663,20 +661,16 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
663 return ret; 661 return ret;
664} 662}
665 663
666static bool rspi_filter(struct dma_chan *chan, void *filter_param) 664static int __devinit rspi_request_dma(struct rspi_data *rspi,
667{ 665 struct platform_device *pdev)
668 chan->private = filter_param;
669 return true;
670}
671
672static void __devinit rspi_request_dma(struct rspi_data *rspi,
673 struct platform_device *pdev)
674{ 666{
675 struct rspi_plat_data *rspi_pd = pdev->dev.platform_data; 667 struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
676 dma_cap_mask_t mask; 668 dma_cap_mask_t mask;
669 struct dma_slave_config cfg;
670 int ret;
677 671
678 if (!rspi_pd) 672 if (!rspi_pd)
679 return; 673 return 0; /* The driver assumes no error. */
680 674
681 rspi->dma_width_16bit = rspi_pd->dma_width_16bit; 675 rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
682 676
@@ -684,21 +678,35 @@ static void __devinit rspi_request_dma(struct rspi_data *rspi,
684 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) { 678 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
685 dma_cap_zero(mask); 679 dma_cap_zero(mask);
686 dma_cap_set(DMA_SLAVE, mask); 680 dma_cap_set(DMA_SLAVE, mask);
687 rspi->dma_rx.slave_id = rspi_pd->dma_rx_id; 681 rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
688 rspi->chan_rx = dma_request_channel(mask, rspi_filter, 682 (void *)rspi_pd->dma_rx_id);
689 &rspi->dma_rx); 683 if (rspi->chan_rx) {
690 if (rspi->chan_rx) 684 cfg.slave_id = rspi_pd->dma_rx_id;
691 dev_info(&pdev->dev, "Use DMA when rx.\n"); 685 cfg.direction = DMA_DEV_TO_MEM;
686 ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
687 if (!ret)
688 dev_info(&pdev->dev, "Use DMA when rx.\n");
689 else
690 return ret;
691 }
692 } 692 }
693 if (rspi_pd->dma_tx_id) { 693 if (rspi_pd->dma_tx_id) {
694 dma_cap_zero(mask); 694 dma_cap_zero(mask);
695 dma_cap_set(DMA_SLAVE, mask); 695 dma_cap_set(DMA_SLAVE, mask);
696 rspi->dma_tx.slave_id = rspi_pd->dma_tx_id; 696 rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
697 rspi->chan_tx = dma_request_channel(mask, rspi_filter, 697 (void *)rspi_pd->dma_tx_id);
698 &rspi->dma_tx); 698 if (rspi->chan_tx) {
699 if (rspi->chan_tx) 699 cfg.slave_id = rspi_pd->dma_tx_id;
700 dev_info(&pdev->dev, "Use DMA when tx\n"); 700 cfg.direction = DMA_MEM_TO_DEV;
701 ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
702 if (!ret)
703 dev_info(&pdev->dev, "Use DMA when tx\n");
704 else
705 return ret;
706 }
701 } 707 }
708
709 return 0;
702} 710}
703 711
704static void __devexit rspi_release_dma(struct rspi_data *rspi) 712static void __devexit rspi_release_dma(struct rspi_data *rspi)
@@ -788,7 +796,11 @@ static int __devinit rspi_probe(struct platform_device *pdev)
788 } 796 }
789 797
790 rspi->irq = irq; 798 rspi->irq = irq;
791 rspi_request_dma(rspi, pdev); 799 ret = rspi_request_dma(rspi, pdev);
800 if (ret < 0) {
801 dev_err(&pdev->dev, "rspi_request_dma failed.\n");
802 goto error4;
803 }
792 804
793 ret = spi_register_master(master); 805 ret = spi_register_master(master);
794 if (ret < 0) { 806 if (ret < 0) {
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index c101697a4ba7..765a945f8ea1 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -60,7 +60,8 @@ config LCD_LTV350QV
60 The LTV350QV panel is present on all ATSTK1000 boards. 60 The LTV350QV panel is present on all ATSTK1000 boards.
61 61
62config LCD_ILI9320 62config LCD_ILI9320
63 tristate 63 tristate "ILI Technology ILI9320 controller support"
64 depends on SPI
64 help 65 help
65 If you have a panel based on the ILI9320 controller chip 66 If you have a panel based on the ILI9320 controller chip
66 then say y to include a power driver for it. 67 then say y to include a power driver for it.
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d4dffcd52873..126d8ce591ce 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -3,6 +3,7 @@ menu "Xen driver support"
3 3
4config XEN_BALLOON 4config XEN_BALLOON
5 bool "Xen memory balloon driver" 5 bool "Xen memory balloon driver"
6 depends on !ARM
6 default y 7 default y
7 help 8 help
8 The balloon driver allows the Xen domain to request more memory from 9 The balloon driver allows the Xen domain to request more memory from
@@ -145,6 +146,7 @@ config SWIOTLB_XEN
145 146
146config XEN_TMEM 147config XEN_TMEM
147 bool 148 bool
149 depends on !ARM
148 default y if (CLEANCACHE || FRONTSWAP) 150 default y if (CLEANCACHE || FRONTSWAP)
149 help 151 help
150 Shim to interface in-kernel Transcendent Memory hooks 152 Shim to interface in-kernel Transcendent Memory hooks
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 31ab82fda38a..d6886d90ccfd 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -55,7 +55,6 @@
55#include <asm/pgalloc.h> 55#include <asm/pgalloc.h>
56#include <asm/pgtable.h> 56#include <asm/pgtable.h>
57#include <asm/tlb.h> 57#include <asm/tlb.h>
58#include <asm/e820.h>
59 58
60#include <asm/xen/hypervisor.h> 59#include <asm/xen/hypervisor.h>
61#include <asm/xen/hypercall.h> 60#include <asm/xen/hypercall.h>
@@ -88,7 +87,7 @@ struct balloon_stats balloon_stats;
88EXPORT_SYMBOL_GPL(balloon_stats); 87EXPORT_SYMBOL_GPL(balloon_stats);
89 88
90/* We increase/decrease in batches which fit in a page */ 89/* We increase/decrease in batches which fit in a page */
91static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; 90static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
92 91
93#ifdef CONFIG_HIGHMEM 92#ifdef CONFIG_HIGHMEM
94#define inc_totalhigh_pages() (totalhigh_pages++) 93#define inc_totalhigh_pages() (totalhigh_pages++)
diff --git a/drivers/xen/dbgp.c b/drivers/xen/dbgp.c
index 42569c77ccc8..f3ccc80a455f 100644
--- a/drivers/xen/dbgp.c
+++ b/drivers/xen/dbgp.c
@@ -8,7 +8,9 @@
8 8
9static int xen_dbgp_op(struct usb_hcd *hcd, int op) 9static int xen_dbgp_op(struct usb_hcd *hcd, int op)
10{ 10{
11#ifdef CONFIG_PCI
11 const struct device *ctrlr = hcd_to_bus(hcd)->controller; 12 const struct device *ctrlr = hcd_to_bus(hcd)->controller;
13#endif
12 struct physdev_dbgp_op dbgp; 14 struct physdev_dbgp_op dbgp;
13 15
14 if (!xen_initial_domain()) 16 if (!xen_initial_domain())
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 59e10a1286d5..912ac81b6dbf 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -115,7 +115,9 @@ struct irq_info {
115#define PIRQ_SHAREABLE (1 << 1) 115#define PIRQ_SHAREABLE (1 << 1)
116 116
117static int *evtchn_to_irq; 117static int *evtchn_to_irq;
118#ifdef CONFIG_X86
118static unsigned long *pirq_eoi_map; 119static unsigned long *pirq_eoi_map;
120#endif
119static bool (*pirq_needs_eoi)(unsigned irq); 121static bool (*pirq_needs_eoi)(unsigned irq);
120 122
121static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], 123static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
@@ -277,10 +279,12 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
277 return ret; 279 return ret;
278} 280}
279 281
282#ifdef CONFIG_X86
280static bool pirq_check_eoi_map(unsigned irq) 283static bool pirq_check_eoi_map(unsigned irq)
281{ 284{
282 return test_bit(pirq_from_irq(irq), pirq_eoi_map); 285 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
283} 286}
287#endif
284 288
285static bool pirq_needs_eoi_flag(unsigned irq) 289static bool pirq_needs_eoi_flag(unsigned irq)
286{ 290{
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index b2b0a375b348..b91f14e83164 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -84,7 +84,7 @@ struct gnttab_ops {
84 * nr_gframes is the number of frames to map grant table. Returning 84 * nr_gframes is the number of frames to map grant table. Returning
85 * GNTST_okay means success and negative value means failure. 85 * GNTST_okay means success and negative value means failure.
86 */ 86 */
87 int (*map_frames)(unsigned long *frames, unsigned int nr_gframes); 87 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
88 /* 88 /*
89 * Release a list of frames which are mapped in map_frames for grant 89 * Release a list of frames which are mapped in map_frames for grant
90 * entry status. 90 * entry status.
@@ -960,7 +960,7 @@ static unsigned nr_status_frames(unsigned nr_grant_frames)
960 return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; 960 return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
961} 961}
962 962
963static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes) 963static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
964{ 964{
965 int rc; 965 int rc;
966 966
@@ -977,7 +977,7 @@ static void gnttab_unmap_frames_v1(void)
977 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); 977 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
978} 978}
979 979
980static int gnttab_map_frames_v2(unsigned long *frames, unsigned int nr_gframes) 980static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
981{ 981{
982 uint64_t *sframes; 982 uint64_t *sframes;
983 unsigned int nr_sframes; 983 unsigned int nr_sframes;
@@ -1029,7 +1029,7 @@ static void gnttab_unmap_frames_v2(void)
1029static int gnttab_map(unsigned int start_idx, unsigned int end_idx) 1029static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1030{ 1030{
1031 struct gnttab_setup_table setup; 1031 struct gnttab_setup_table setup;
1032 unsigned long *frames; 1032 xen_pfn_t *frames;
1033 unsigned int nr_gframes = end_idx + 1; 1033 unsigned int nr_gframes = end_idx + 1;
1034 int rc; 1034 int rc;
1035 1035
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index 5e5ad7e28858..96453f8a85c5 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kobject.h> 13#include <linux/kobject.h>
14#include <linux/err.h>
14 15
15#include <asm/xen/hypervisor.h> 16#include <asm/xen/hypervisor.h>
16#include <asm/xen/hypercall.h> 17#include <asm/xen/hypercall.h>
@@ -284,7 +285,8 @@ static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
284 ret = HYPERVISOR_xen_version(XENVER_platform_parameters, 285 ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
285 parms); 286 parms);
286 if (!ret) 287 if (!ret)
287 ret = sprintf(buffer, "%lx\n", parms->virt_start); 288 ret = sprintf(buffer, "%"PRI_xen_ulong"\n",
289 parms->virt_start);
288 kfree(parms); 290 kfree(parms);
289 } 291 }
290 292
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 46d140baebd8..0f478ac483cd 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -89,9 +89,15 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
89 89
90 mutex_lock(&vpci_dev->lock); 90 mutex_lock(&vpci_dev->lock);
91 91
92 /* Keep multi-function devices together on the virtual PCI bus */ 92 /*
93 for (slot = 0; slot < PCI_SLOT_MAX; slot++) { 93 * Keep multi-function devices together on the virtual PCI bus, except
94 if (!list_empty(&vpci_dev->dev_list[slot])) { 94 * virtual functions.
95 */
96 if (!dev->is_virtfn) {
97 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
98 if (list_empty(&vpci_dev->dev_list[slot]))
99 continue;
100
95 t = list_entry(list_first(&vpci_dev->dev_list[slot]), 101 t = list_entry(list_first(&vpci_dev->dev_list[slot]),
96 struct pci_dev_entry, list); 102 struct pci_dev_entry, list);
97 103
@@ -116,7 +122,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
116 pci_name(dev), slot); 122 pci_name(dev), slot);
117 list_add_tail(&dev_entry->list, 123 list_add_tail(&dev_entry->list,
118 &vpci_dev->dev_list[slot]); 124 &vpci_dev->dev_list[slot]);
119 func = PCI_FUNC(dev->devfn); 125 func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
120 goto unlock; 126 goto unlock;
121 } 127 }
122 } 128 }
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index f5dda83ad7a5..acedeabe589c 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -627,6 +627,7 @@ static struct xenbus_watch *find_watch(const char *token)
627 */ 627 */
628static bool xen_strict_xenbus_quirk(void) 628static bool xen_strict_xenbus_quirk(void)
629{ 629{
630#ifdef CONFIG_X86
630 uint32_t eax, ebx, ecx, edx, base; 631 uint32_t eax, ebx, ecx, edx, base;
631 632
632 base = xen_cpuid_base(); 633 base = xen_cpuid_base();
@@ -634,6 +635,7 @@ static bool xen_strict_xenbus_quirk(void)
634 635
635 if ((eax >> 16) < 4) 636 if ((eax >> 16) < 4)
636 return true; 637 return true;
638#endif
637 return false; 639 return false;
638 640
639} 641}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f3187938e081..208d8aa5b07e 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -283,9 +283,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
283 goto out; 283 goto out;
284 } 284 }
285 285
286 rcu_read_lock(); 286 root_level = btrfs_old_root_level(root, time_seq);
287 root_level = btrfs_header_level(root->node);
288 rcu_read_unlock();
289 287
290 if (root_level + 1 == level) 288 if (root_level + 1 == level)
291 goto out; 289 goto out;
@@ -1177,16 +1175,15 @@ int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1177 return ret; 1175 return ret;
1178} 1176}
1179 1177
1180static char *ref_to_path(struct btrfs_root *fs_root, 1178char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1181 struct btrfs_path *path, 1179 u32 name_len, unsigned long name_off,
1182 u32 name_len, unsigned long name_off, 1180 struct extent_buffer *eb_in, u64 parent,
1183 struct extent_buffer *eb_in, u64 parent, 1181 char *dest, u32 size)
1184 char *dest, u32 size)
1185{ 1182{
1186 int slot; 1183 int slot;
1187 u64 next_inum; 1184 u64 next_inum;
1188 int ret; 1185 int ret;
1189 s64 bytes_left = size - 1; 1186 s64 bytes_left = ((s64)size) - 1;
1190 struct extent_buffer *eb = eb_in; 1187 struct extent_buffer *eb = eb_in;
1191 struct btrfs_key found_key; 1188 struct btrfs_key found_key;
1192 int leave_spinning = path->leave_spinning; 1189 int leave_spinning = path->leave_spinning;
@@ -1266,10 +1263,10 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root,
1266 struct extent_buffer *eb_in, u64 parent, 1263 struct extent_buffer *eb_in, u64 parent,
1267 char *dest, u32 size) 1264 char *dest, u32 size)
1268{ 1265{
1269 return ref_to_path(fs_root, path, 1266 return btrfs_ref_to_path(fs_root, path,
1270 btrfs_inode_ref_name_len(eb_in, iref), 1267 btrfs_inode_ref_name_len(eb_in, iref),
1271 (unsigned long)(iref + 1), 1268 (unsigned long)(iref + 1),
1272 eb_in, parent, dest, size); 1269 eb_in, parent, dest, size);
1273} 1270}
1274 1271
1275/* 1272/*
@@ -1715,9 +1712,8 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1715 ipath->fspath->bytes_left - s_ptr : 0; 1712 ipath->fspath->bytes_left - s_ptr : 0;
1716 1713
1717 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 1714 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1718 fspath = ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, 1715 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
1719 name_off, eb, inum, fspath_min, 1716 name_off, eb, inum, fspath_min, bytes_left);
1720 bytes_left);
1721 if (IS_ERR(fspath)) 1717 if (IS_ERR(fspath))
1722 return PTR_ERR(fspath); 1718 return PTR_ERR(fspath);
1723 1719
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index e75533043a5f..d61feca79455 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -62,6 +62,10 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
62char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 62char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
63 struct btrfs_inode_ref *iref, struct extent_buffer *eb, 63 struct btrfs_inode_ref *iref, struct extent_buffer *eb,
64 u64 parent, char *dest, u32 size); 64 u64 parent, char *dest, u32 size);
65char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
66 u32 name_len, unsigned long name_off,
67 struct extent_buffer *eb_in, u64 parent,
68 char *dest, u32 size);
65 69
66struct btrfs_data_container *init_data_container(u32 total_bytes); 70struct btrfs_data_container *init_data_container(u32 total_bytes);
67struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 71struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b33436211000..cdfb4c49a806 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -596,6 +596,11 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
596 if (tree_mod_dont_log(fs_info, eb)) 596 if (tree_mod_dont_log(fs_info, eb))
597 return 0; 597 return 0;
598 598
599 /*
600 * When we override something during the move, we log these removals.
601 * This can only happen when we move towards the beginning of the
602 * buffer, i.e. dst_slot < src_slot.
603 */
599 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { 604 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
600 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot, 605 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
601 MOD_LOG_KEY_REMOVE_WHILE_MOVING); 606 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
@@ -647,8 +652,6 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
647 if (tree_mod_dont_log(fs_info, NULL)) 652 if (tree_mod_dont_log(fs_info, NULL))
648 return 0; 653 return 0;
649 654
650 __tree_mod_log_free_eb(fs_info, old_root);
651
652 ret = tree_mod_alloc(fs_info, flags, &tm); 655 ret = tree_mod_alloc(fs_info, flags, &tm);
653 if (ret < 0) 656 if (ret < 0)
654 goto out; 657 goto out;
@@ -926,12 +929,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
926 ret = btrfs_dec_ref(trans, root, buf, 1, 1); 929 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
927 BUG_ON(ret); /* -ENOMEM */ 930 BUG_ON(ret); /* -ENOMEM */
928 } 931 }
929 /* 932 tree_mod_log_free_eb(root->fs_info, buf);
930 * don't log freeing in case we're freeing the root node, this
931 * is done by tree_mod_log_set_root_pointer later
932 */
933 if (buf != root->node && btrfs_header_level(buf) != 0)
934 tree_mod_log_free_eb(root->fs_info, buf);
935 clean_tree_block(trans, root, buf); 933 clean_tree_block(trans, root, buf);
936 *last_ref = 1; 934 *last_ref = 1;
937 } 935 }
@@ -1225,6 +1223,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1225 free_extent_buffer(eb); 1223 free_extent_buffer(eb);
1226 1224
1227 __tree_mod_log_rewind(eb_rewin, time_seq, tm); 1225 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1226 WARN_ON(btrfs_header_nritems(eb_rewin) >
1227 BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
1228 1228
1229 return eb_rewin; 1229 return eb_rewin;
1230} 1230}
@@ -1241,9 +1241,11 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1241{ 1241{
1242 struct tree_mod_elem *tm; 1242 struct tree_mod_elem *tm;
1243 struct extent_buffer *eb; 1243 struct extent_buffer *eb;
1244 struct extent_buffer *old;
1244 struct tree_mod_root *old_root = NULL; 1245 struct tree_mod_root *old_root = NULL;
1245 u64 old_generation = 0; 1246 u64 old_generation = 0;
1246 u64 logical; 1247 u64 logical;
1248 u32 blocksize;
1247 1249
1248 eb = btrfs_read_lock_root_node(root); 1250 eb = btrfs_read_lock_root_node(root);
1249 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); 1251 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
@@ -1259,14 +1261,32 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1259 } 1261 }
1260 1262
1261 tm = tree_mod_log_search(root->fs_info, logical, time_seq); 1263 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1262 if (old_root) 1264 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1265 btrfs_tree_read_unlock(root->node);
1266 free_extent_buffer(root->node);
1267 blocksize = btrfs_level_size(root, old_root->level);
1268 old = read_tree_block(root, logical, blocksize, 0);
1269 if (!old) {
1270 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1271 logical);
1272 WARN_ON(1);
1273 } else {
1274 eb = btrfs_clone_extent_buffer(old);
1275 free_extent_buffer(old);
1276 }
1277 } else if (old_root) {
1278 btrfs_tree_read_unlock(root->node);
1279 free_extent_buffer(root->node);
1263 eb = alloc_dummy_extent_buffer(logical, root->nodesize); 1280 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1264 else 1281 } else {
1265 eb = btrfs_clone_extent_buffer(root->node); 1282 eb = btrfs_clone_extent_buffer(root->node);
1266 btrfs_tree_read_unlock(root->node); 1283 btrfs_tree_read_unlock(root->node);
1267 free_extent_buffer(root->node); 1284 free_extent_buffer(root->node);
1285 }
1286
1268 if (!eb) 1287 if (!eb)
1269 return NULL; 1288 return NULL;
1289 extent_buffer_get(eb);
1270 btrfs_tree_read_lock(eb); 1290 btrfs_tree_read_lock(eb);
1271 if (old_root) { 1291 if (old_root) {
1272 btrfs_set_header_bytenr(eb, eb->start); 1292 btrfs_set_header_bytenr(eb, eb->start);
@@ -1279,11 +1299,28 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1279 __tree_mod_log_rewind(eb, time_seq, tm); 1299 __tree_mod_log_rewind(eb, time_seq, tm);
1280 else 1300 else
1281 WARN_ON(btrfs_header_level(eb) != 0); 1301 WARN_ON(btrfs_header_level(eb) != 0);
1282 extent_buffer_get(eb); 1302 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1283 1303
1284 return eb; 1304 return eb;
1285} 1305}
1286 1306
1307int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1308{
1309 struct tree_mod_elem *tm;
1310 int level;
1311
1312 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1313 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1314 level = tm->old_root.level;
1315 } else {
1316 rcu_read_lock();
1317 level = btrfs_header_level(root->node);
1318 rcu_read_unlock();
1319 }
1320
1321 return level;
1322}
1323
1287static inline int should_cow_block(struct btrfs_trans_handle *trans, 1324static inline int should_cow_block(struct btrfs_trans_handle *trans,
1288 struct btrfs_root *root, 1325 struct btrfs_root *root,
1289 struct extent_buffer *buf) 1326 struct extent_buffer *buf)
@@ -1725,6 +1762,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1725 goto enospc; 1762 goto enospc;
1726 } 1763 }
1727 1764
1765 tree_mod_log_free_eb(root->fs_info, root->node);
1728 tree_mod_log_set_root_pointer(root, child); 1766 tree_mod_log_set_root_pointer(root, child);
1729 rcu_assign_pointer(root->node, child); 1767 rcu_assign_pointer(root->node, child);
1730 1768
@@ -2970,8 +3008,10 @@ static int push_node_left(struct btrfs_trans_handle *trans,
2970 push_items * sizeof(struct btrfs_key_ptr)); 3008 push_items * sizeof(struct btrfs_key_ptr));
2971 3009
2972 if (push_items < src_nritems) { 3010 if (push_items < src_nritems) {
2973 tree_mod_log_eb_move(root->fs_info, src, 0, push_items, 3011 /*
2974 src_nritems - push_items); 3012 * don't call tree_mod_log_eb_move here, key removal was already
3013 * fully logged by tree_mod_log_eb_copy above.
3014 */
2975 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), 3015 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2976 btrfs_node_key_ptr_offset(push_items), 3016 btrfs_node_key_ptr_offset(push_items),
2977 (src_nritems - push_items) * 3017 (src_nritems - push_items) *
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 926c9ffc66d9..c72ead869507 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3120,6 +3120,7 @@ static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
3120{ 3120{
3121 return atomic_inc_return(&fs_info->tree_mod_seq); 3121 return atomic_inc_return(&fs_info->tree_mod_seq);
3122} 3122}
3123int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
3123 3124
3124/* root-item.c */ 3125/* root-item.c */
3125int btrfs_find_root_ref(struct btrfs_root *tree_root, 3126int btrfs_find_root_ref(struct btrfs_root *tree_root,
@@ -3338,6 +3339,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3338int btrfs_update_inode(struct btrfs_trans_handle *trans, 3339int btrfs_update_inode(struct btrfs_trans_handle *trans,
3339 struct btrfs_root *root, 3340 struct btrfs_root *root,
3340 struct inode *inode); 3341 struct inode *inode);
3342int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3343 struct btrfs_root *root, struct inode *inode);
3341int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 3344int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
3342int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); 3345int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
3343int btrfs_orphan_cleanup(struct btrfs_root *root); 3346int btrfs_orphan_cleanup(struct btrfs_root *root);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8036d3a84853..472873a94d96 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4110,8 +4110,8 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4110 4110
4111 return eb; 4111 return eb;
4112err: 4112err:
4113 for (i--; i >= 0; i--) 4113 for (; i > 0; i--)
4114 __free_page(eb->pages[i]); 4114 __free_page(eb->pages[i - 1]);
4115 __free_extent_buffer(eb); 4115 __free_extent_buffer(eb);
4116 return NULL; 4116 return NULL;
4117} 4117}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 85a1e5053fe6..95542a1b3dfc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -94,8 +94,6 @@ static noinline int cow_file_range(struct inode *inode,
94 struct page *locked_page, 94 struct page *locked_page,
95 u64 start, u64 end, int *page_started, 95 u64 start, u64 end, int *page_started,
96 unsigned long *nr_written, int unlock); 96 unsigned long *nr_written, int unlock);
97static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root, struct inode *inode);
99 97
100static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 98static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
101 struct inode *inode, struct inode *dir, 99 struct inode *inode, struct inode *dir,
@@ -2746,8 +2744,9 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2746 return btrfs_update_inode_item(trans, root, inode); 2744 return btrfs_update_inode_item(trans, root, inode);
2747} 2745}
2748 2746
2749static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 2747noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2750 struct btrfs_root *root, struct inode *inode) 2748 struct btrfs_root *root,
2749 struct inode *inode)
2751{ 2750{
2752 int ret; 2751 int ret;
2753 2752
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 61168805f175..8fcf9a59c28d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -343,7 +343,8 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
343 return -EOPNOTSUPP; 343 return -EOPNOTSUPP;
344 if (copy_from_user(&range, arg, sizeof(range))) 344 if (copy_from_user(&range, arg, sizeof(range)))
345 return -EFAULT; 345 return -EFAULT;
346 if (range.start > total_bytes) 346 if (range.start > total_bytes ||
347 range.len < fs_info->sb->s_blocksize)
347 return -EINVAL; 348 return -EINVAL;
348 349
349 range.len = min(range.len, total_bytes - range.start); 350 range.len = min(range.len, total_bytes - range.start);
@@ -570,7 +571,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
570 ret = btrfs_commit_transaction(trans, 571 ret = btrfs_commit_transaction(trans,
571 root->fs_info->extent_root); 572 root->fs_info->extent_root);
572 } 573 }
573 BUG_ON(ret); 574 if (ret)
575 goto fail;
574 576
575 ret = pending_snapshot->error; 577 ret = pending_snapshot->error;
576 if (ret) 578 if (ret)
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5039686df6ae..fe9d02c45f8e 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -790,8 +790,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
790 } 790 }
791 791
792 path = btrfs_alloc_path(); 792 path = btrfs_alloc_path();
793 if (!path) 793 if (!path) {
794 return -ENOMEM; 794 ret = -ENOMEM;
795 goto out_free_root;
796 }
795 797
796 key.objectid = 0; 798 key.objectid = 0;
797 key.type = BTRFS_QGROUP_STATUS_KEY; 799 key.type = BTRFS_QGROUP_STATUS_KEY;
@@ -800,7 +802,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
800 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 802 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
801 sizeof(*ptr)); 803 sizeof(*ptr));
802 if (ret) 804 if (ret)
803 goto out; 805 goto out_free_path;
804 806
805 leaf = path->nodes[0]; 807 leaf = path->nodes[0];
806 ptr = btrfs_item_ptr(leaf, path->slots[0], 808 ptr = btrfs_item_ptr(leaf, path->slots[0],
@@ -818,8 +820,15 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
818 fs_info->quota_root = quota_root; 820 fs_info->quota_root = quota_root;
819 fs_info->pending_quota_state = 1; 821 fs_info->pending_quota_state = 1;
820 spin_unlock(&fs_info->qgroup_lock); 822 spin_unlock(&fs_info->qgroup_lock);
821out: 823out_free_path:
822 btrfs_free_path(path); 824 btrfs_free_path(path);
825out_free_root:
826 if (ret) {
827 free_extent_buffer(quota_root->node);
828 free_extent_buffer(quota_root->commit_root);
829 kfree(quota_root);
830 }
831out:
823 return ret; 832 return ret;
824} 833}
825 834
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index c7beb543a4a8..e78b297b0b00 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -745,31 +745,36 @@ typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
745 void *ctx); 745 void *ctx);
746 746
747/* 747/*
748 * Helper function to iterate the entries in ONE btrfs_inode_ref. 748 * Helper function to iterate the entries in ONE btrfs_inode_ref or
749 * btrfs_inode_extref.
749 * The iterate callback may return a non zero value to stop iteration. This can 750 * The iterate callback may return a non zero value to stop iteration. This can
750 * be a negative value for error codes or 1 to simply stop it. 751 * be a negative value for error codes or 1 to simply stop it.
751 * 752 *
752 * path must point to the INODE_REF when called. 753 * path must point to the INODE_REF or INODE_EXTREF when called.
753 */ 754 */
754static int iterate_inode_ref(struct send_ctx *sctx, 755static int iterate_inode_ref(struct send_ctx *sctx,
755 struct btrfs_root *root, struct btrfs_path *path, 756 struct btrfs_root *root, struct btrfs_path *path,
756 struct btrfs_key *found_key, int resolve, 757 struct btrfs_key *found_key, int resolve,
757 iterate_inode_ref_t iterate, void *ctx) 758 iterate_inode_ref_t iterate, void *ctx)
758{ 759{
759 struct extent_buffer *eb; 760 struct extent_buffer *eb = path->nodes[0];
760 struct btrfs_item *item; 761 struct btrfs_item *item;
761 struct btrfs_inode_ref *iref; 762 struct btrfs_inode_ref *iref;
763 struct btrfs_inode_extref *extref;
762 struct btrfs_path *tmp_path; 764 struct btrfs_path *tmp_path;
763 struct fs_path *p; 765 struct fs_path *p;
764 u32 cur; 766 u32 cur = 0;
765 u32 len;
766 u32 total; 767 u32 total;
767 int slot; 768 int slot = path->slots[0];
768 u32 name_len; 769 u32 name_len;
769 char *start; 770 char *start;
770 int ret = 0; 771 int ret = 0;
771 int num; 772 int num = 0;
772 int index; 773 int index;
774 u64 dir;
775 unsigned long name_off;
776 unsigned long elem_size;
777 unsigned long ptr;
773 778
774 p = fs_path_alloc_reversed(sctx); 779 p = fs_path_alloc_reversed(sctx);
775 if (!p) 780 if (!p)
@@ -781,24 +786,40 @@ static int iterate_inode_ref(struct send_ctx *sctx,
781 return -ENOMEM; 786 return -ENOMEM;
782 } 787 }
783 788
784 eb = path->nodes[0];
785 slot = path->slots[0];
786 item = btrfs_item_nr(eb, slot);
787 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
788 cur = 0;
789 len = 0;
790 total = btrfs_item_size(eb, item);
791 789
792 num = 0; 790 if (found_key->type == BTRFS_INODE_REF_KEY) {
791 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
792 struct btrfs_inode_ref);
793 item = btrfs_item_nr(eb, slot);
794 total = btrfs_item_size(eb, item);
795 elem_size = sizeof(*iref);
796 } else {
797 ptr = btrfs_item_ptr_offset(eb, slot);
798 total = btrfs_item_size_nr(eb, slot);
799 elem_size = sizeof(*extref);
800 }
801
793 while (cur < total) { 802 while (cur < total) {
794 fs_path_reset(p); 803 fs_path_reset(p);
795 804
796 name_len = btrfs_inode_ref_name_len(eb, iref); 805 if (found_key->type == BTRFS_INODE_REF_KEY) {
797 index = btrfs_inode_ref_index(eb, iref); 806 iref = (struct btrfs_inode_ref *)(ptr + cur);
807 name_len = btrfs_inode_ref_name_len(eb, iref);
808 name_off = (unsigned long)(iref + 1);
809 index = btrfs_inode_ref_index(eb, iref);
810 dir = found_key->offset;
811 } else {
812 extref = (struct btrfs_inode_extref *)(ptr + cur);
813 name_len = btrfs_inode_extref_name_len(eb, extref);
814 name_off = (unsigned long)&extref->name;
815 index = btrfs_inode_extref_index(eb, extref);
816 dir = btrfs_inode_extref_parent(eb, extref);
817 }
818
798 if (resolve) { 819 if (resolve) {
799 start = btrfs_iref_to_path(root, tmp_path, iref, eb, 820 start = btrfs_ref_to_path(root, tmp_path, name_len,
800 found_key->offset, p->buf, 821 name_off, eb, dir,
801 p->buf_len); 822 p->buf, p->buf_len);
802 if (IS_ERR(start)) { 823 if (IS_ERR(start)) {
803 ret = PTR_ERR(start); 824 ret = PTR_ERR(start);
804 goto out; 825 goto out;
@@ -809,9 +830,10 @@ static int iterate_inode_ref(struct send_ctx *sctx,
809 p->buf_len + p->buf - start); 830 p->buf_len + p->buf - start);
810 if (ret < 0) 831 if (ret < 0)
811 goto out; 832 goto out;
812 start = btrfs_iref_to_path(root, tmp_path, iref, 833 start = btrfs_ref_to_path(root, tmp_path,
813 eb, found_key->offset, p->buf, 834 name_len, name_off,
814 p->buf_len); 835 eb, dir,
836 p->buf, p->buf_len);
815 if (IS_ERR(start)) { 837 if (IS_ERR(start)) {
816 ret = PTR_ERR(start); 838 ret = PTR_ERR(start);
817 goto out; 839 goto out;
@@ -820,21 +842,16 @@ static int iterate_inode_ref(struct send_ctx *sctx,
820 } 842 }
821 p->start = start; 843 p->start = start;
822 } else { 844 } else {
823 ret = fs_path_add_from_extent_buffer(p, eb, 845 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
824 (unsigned long)(iref + 1), name_len); 846 name_len);
825 if (ret < 0) 847 if (ret < 0)
826 goto out; 848 goto out;
827 } 849 }
828 850
829 851 cur += elem_size + name_len;
830 len = sizeof(*iref) + name_len; 852 ret = iterate(num, dir, index, p, ctx);
831 iref = (struct btrfs_inode_ref *)((char *)iref + len);
832 cur += len;
833
834 ret = iterate(num, found_key->offset, index, p, ctx);
835 if (ret) 853 if (ret)
836 goto out; 854 goto out;
837
838 num++; 855 num++;
839 } 856 }
840 857
@@ -998,7 +1015,8 @@ static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root,
998 } 1015 }
999 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); 1016 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1000 if (found_key.objectid != ino || 1017 if (found_key.objectid != ino ||
1001 found_key.type != BTRFS_INODE_REF_KEY) { 1018 (found_key.type != BTRFS_INODE_REF_KEY &&
1019 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1002 ret = -ENOENT; 1020 ret = -ENOENT;
1003 goto out; 1021 goto out;
1004 } 1022 }
@@ -1551,8 +1569,8 @@ static int get_first_ref(struct send_ctx *sctx,
1551 struct btrfs_key key; 1569 struct btrfs_key key;
1552 struct btrfs_key found_key; 1570 struct btrfs_key found_key;
1553 struct btrfs_path *path; 1571 struct btrfs_path *path;
1554 struct btrfs_inode_ref *iref;
1555 int len; 1572 int len;
1573 u64 parent_dir;
1556 1574
1557 path = alloc_path_for_send(); 1575 path = alloc_path_for_send();
1558 if (!path) 1576 if (!path)
@@ -1568,27 +1586,41 @@ static int get_first_ref(struct send_ctx *sctx,
1568 if (!ret) 1586 if (!ret)
1569 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1587 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1570 path->slots[0]); 1588 path->slots[0]);
1571 if (ret || found_key.objectid != key.objectid || 1589 if (ret || found_key.objectid != ino ||
1572 found_key.type != key.type) { 1590 (found_key.type != BTRFS_INODE_REF_KEY &&
1591 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1573 ret = -ENOENT; 1592 ret = -ENOENT;
1574 goto out; 1593 goto out;
1575 } 1594 }
1576 1595
1577 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1596 if (key.type == BTRFS_INODE_REF_KEY) {
1578 struct btrfs_inode_ref); 1597 struct btrfs_inode_ref *iref;
1579 len = btrfs_inode_ref_name_len(path->nodes[0], iref); 1598 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1580 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1599 struct btrfs_inode_ref);
1581 (unsigned long)(iref + 1), len); 1600 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1601 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1602 (unsigned long)(iref + 1),
1603 len);
1604 parent_dir = found_key.offset;
1605 } else {
1606 struct btrfs_inode_extref *extref;
1607 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1608 struct btrfs_inode_extref);
1609 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1610 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1611 (unsigned long)&extref->name, len);
1612 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1613 }
1582 if (ret < 0) 1614 if (ret < 0)
1583 goto out; 1615 goto out;
1584 btrfs_release_path(path); 1616 btrfs_release_path(path);
1585 1617
1586 ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL, 1618 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL,
1587 NULL, NULL); 1619 NULL, NULL);
1588 if (ret < 0) 1620 if (ret < 0)
1589 goto out; 1621 goto out;
1590 1622
1591 *dir = found_key.offset; 1623 *dir = parent_dir;
1592 1624
1593out: 1625out:
1594 btrfs_free_path(path); 1626 btrfs_free_path(path);
@@ -2430,7 +2462,8 @@ verbose_printk("btrfs: send_create_inode %llu\n", ino);
2430 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); 2462 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2431 } else if (S_ISCHR(mode) || S_ISBLK(mode) || 2463 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2432 S_ISFIFO(mode) || S_ISSOCK(mode)) { 2464 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2433 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, rdev); 2465 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2466 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2434 } 2467 }
2435 2468
2436 ret = send_cmd(sctx); 2469 ret = send_cmd(sctx);
@@ -3226,7 +3259,8 @@ static int process_all_refs(struct send_ctx *sctx,
3226 btrfs_item_key_to_cpu(eb, &found_key, slot); 3259 btrfs_item_key_to_cpu(eb, &found_key, slot);
3227 3260
3228 if (found_key.objectid != key.objectid || 3261 if (found_key.objectid != key.objectid ||
3229 found_key.type != key.type) 3262 (found_key.type != BTRFS_INODE_REF_KEY &&
3263 found_key.type != BTRFS_INODE_EXTREF_KEY))
3230 break; 3264 break;
3231 3265
3232 ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb, 3266 ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb,
@@ -3987,7 +4021,7 @@ static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
3987 if (sctx->cur_ino == 0) 4021 if (sctx->cur_ino == 0)
3988 goto out; 4022 goto out;
3989 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && 4023 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
3990 sctx->cmp_key->type <= BTRFS_INODE_REF_KEY) 4024 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
3991 goto out; 4025 goto out;
3992 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) 4026 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
3993 goto out; 4027 goto out;
@@ -4033,22 +4067,21 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
4033 if (ret < 0) 4067 if (ret < 0)
4034 goto out; 4068 goto out;
4035 4069
4036 if (!S_ISLNK(sctx->cur_inode_mode)) { 4070 if (!sctx->parent_root || sctx->cur_inode_new) {
4037 if (!sctx->parent_root || sctx->cur_inode_new) { 4071 need_chown = 1;
4072 if (!S_ISLNK(sctx->cur_inode_mode))
4038 need_chmod = 1; 4073 need_chmod = 1;
4039 need_chown = 1; 4074 } else {
4040 } else { 4075 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
4041 ret = get_inode_info(sctx->parent_root, sctx->cur_ino, 4076 NULL, NULL, &right_mode, &right_uid,
4042 NULL, NULL, &right_mode, &right_uid, 4077 &right_gid, NULL);
4043 &right_gid, NULL); 4078 if (ret < 0)
4044 if (ret < 0) 4079 goto out;
4045 goto out;
4046 4080
4047 if (left_uid != right_uid || left_gid != right_gid) 4081 if (left_uid != right_uid || left_gid != right_gid)
4048 need_chown = 1; 4082 need_chown = 1;
4049 if (left_mode != right_mode) 4083 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
4050 need_chmod = 1; 4084 need_chmod = 1;
4051 }
4052 } 4085 }
4053 4086
4054 if (S_ISREG(sctx->cur_inode_mode)) { 4087 if (S_ISREG(sctx->cur_inode_mode)) {
@@ -4335,7 +4368,8 @@ static int changed_cb(struct btrfs_root *left_root,
4335 4368
4336 if (key->type == BTRFS_INODE_ITEM_KEY) 4369 if (key->type == BTRFS_INODE_ITEM_KEY)
4337 ret = changed_inode(sctx, result); 4370 ret = changed_inode(sctx, result);
4338 else if (key->type == BTRFS_INODE_REF_KEY) 4371 else if (key->type == BTRFS_INODE_REF_KEY ||
4372 key->type == BTRFS_INODE_EXTREF_KEY)
4339 ret = changed_ref(sctx, result); 4373 ret = changed_ref(sctx, result);
4340 else if (key->type == BTRFS_XATTR_ITEM_KEY) 4374 else if (key->type == BTRFS_XATTR_ITEM_KEY)
4341 ret = changed_xattr(sctx, result); 4375 ret = changed_xattr(sctx, result);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 77db875b5116..04bbfb1052eb 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1200,7 +1200,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1200 btrfs_i_size_write(parent_inode, parent_inode->i_size + 1200 btrfs_i_size_write(parent_inode, parent_inode->i_size +
1201 dentry->d_name.len * 2); 1201 dentry->d_name.len * 2);
1202 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 1202 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1203 ret = btrfs_update_inode(trans, parent_root, parent_inode); 1203 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1204 if (ret) 1204 if (ret)
1205 btrfs_abort_transaction(trans, root, ret); 1205 btrfs_abort_transaction(trans, root, ret);
1206fail: 1206fail:
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 029b903a4ae3..0f5ebb72a5ea 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1819,6 +1819,13 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1819 "Failed to relocate sys chunks after " 1819 "Failed to relocate sys chunks after "
1820 "device initialization. This can be fixed " 1820 "device initialization. This can be fixed "
1821 "using the \"btrfs balance\" command."); 1821 "using the \"btrfs balance\" command.");
1822 trans = btrfs_attach_transaction(root);
1823 if (IS_ERR(trans)) {
1824 if (PTR_ERR(trans) == -ENOENT)
1825 return 0;
1826 return PTR_ERR(trans);
1827 }
1828 ret = btrfs_commit_transaction(trans, root);
1822 } 1829 }
1823 1830
1824 return ret; 1831 return ret;
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 3f152b92a94a..afc2bb691780 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -471,9 +471,19 @@ static int exact_lock(dev_t dev, void *data)
471 */ 471 */
472int cdev_add(struct cdev *p, dev_t dev, unsigned count) 472int cdev_add(struct cdev *p, dev_t dev, unsigned count)
473{ 473{
474 int error;
475
474 p->dev = dev; 476 p->dev = dev;
475 p->count = count; 477 p->count = count;
476 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p); 478
479 error = kobj_map(cdev_map, dev, count, NULL,
480 exact_match, exact_lock, p);
481 if (error)
482 return error;
483
484 kobject_get(p->kobj.parent);
485
486 return 0;
477} 487}
478 488
479static void cdev_unmap(dev_t dev, unsigned count) 489static void cdev_unmap(dev_t dev, unsigned count)
@@ -498,14 +508,20 @@ void cdev_del(struct cdev *p)
498static void cdev_default_release(struct kobject *kobj) 508static void cdev_default_release(struct kobject *kobj)
499{ 509{
500 struct cdev *p = container_of(kobj, struct cdev, kobj); 510 struct cdev *p = container_of(kobj, struct cdev, kobj);
511 struct kobject *parent = kobj->parent;
512
501 cdev_purge(p); 513 cdev_purge(p);
514 kobject_put(parent);
502} 515}
503 516
504static void cdev_dynamic_release(struct kobject *kobj) 517static void cdev_dynamic_release(struct kobject *kobj)
505{ 518{
506 struct cdev *p = container_of(kobj, struct cdev, kobj); 519 struct cdev *p = container_of(kobj, struct cdev, kobj);
520 struct kobject *parent = kobj->parent;
521
507 cdev_purge(p); 522 cdev_purge(p);
508 kfree(p); 523 kfree(p);
524 kobject_put(parent);
509} 525}
510 526
511static struct kobj_type ktype_cdev_default = { 527static struct kobj_type ktype_cdev_default = {
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index f5054025f9da..4c6285fff598 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
210 210
211 err = get_user(palp, &up->palette); 211 err = get_user(palp, &up->palette);
212 err |= get_user(length, &up->length); 212 err |= get_user(length, &up->length);
213 if (err)
214 return -EFAULT;
213 215
214 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette)); 216 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
215 err = put_user(compat_ptr(palp), &up_native->palette); 217 err = put_user(compat_ptr(palp), &up_native->palette);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 1b5089067d01..cf1821784a16 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -174,8 +174,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
174 ext4_free_inodes_set(sb, gdp, 0); 174 ext4_free_inodes_set(sb, gdp, 0);
175 ext4_itable_unused_set(sb, gdp, 0); 175 ext4_itable_unused_set(sb, gdp, 0);
176 memset(bh->b_data, 0xff, sb->s_blocksize); 176 memset(bh->b_data, 0xff, sb->s_blocksize);
177 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh, 177 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
178 EXT4_BLOCKS_PER_GROUP(sb) / 8);
179 return; 178 return;
180 } 179 }
181 memset(bh->b_data, 0, sb->s_blocksize); 180 memset(bh->b_data, 0, sb->s_blocksize);
@@ -212,8 +211,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
212 */ 211 */
213 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), 212 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
214 sb->s_blocksize * 8, bh->b_data); 213 sb->s_blocksize * 8, bh->b_data);
215 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh, 214 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
216 EXT4_BLOCKS_PER_GROUP(sb) / 8);
217 ext4_group_desc_csum_set(sb, block_group, gdp); 215 ext4_group_desc_csum_set(sb, block_group, gdp);
218} 216}
219 217
@@ -350,7 +348,7 @@ void ext4_validate_block_bitmap(struct super_block *sb,
350 return; 348 return;
351 } 349 }
352 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, 350 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
353 desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) { 351 desc, bh))) {
354 ext4_unlock_group(sb, block_group); 352 ext4_unlock_group(sb, block_group);
355 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); 353 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
356 return; 354 return;
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index 5c2d1813ebe9..3285aa5a706a 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -58,11 +58,12 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
58 58
59int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, 59int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
60 struct ext4_group_desc *gdp, 60 struct ext4_group_desc *gdp,
61 struct buffer_head *bh, int sz) 61 struct buffer_head *bh)
62{ 62{
63 __u32 hi; 63 __u32 hi;
64 __u32 provided, calculated; 64 __u32 provided, calculated;
65 struct ext4_sb_info *sbi = EXT4_SB(sb); 65 struct ext4_sb_info *sbi = EXT4_SB(sb);
66 int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
66 67
67 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 68 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
68 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 69 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
@@ -84,8 +85,9 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
84 85
85void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, 86void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
86 struct ext4_group_desc *gdp, 87 struct ext4_group_desc *gdp,
87 struct buffer_head *bh, int sz) 88 struct buffer_head *bh)
88{ 89{
90 int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
89 __u32 csum; 91 __u32 csum;
90 struct ext4_sb_info *sbi = EXT4_SB(sb); 92 struct ext4_sb_info *sbi = EXT4_SB(sb);
91 93
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 3ab2539b7b2e..3c20de1d59d0 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1882,10 +1882,10 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
1882 struct buffer_head *bh, int sz); 1882 struct buffer_head *bh, int sz);
1883void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, 1883void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
1884 struct ext4_group_desc *gdp, 1884 struct ext4_group_desc *gdp,
1885 struct buffer_head *bh, int sz); 1885 struct buffer_head *bh);
1886int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, 1886int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
1887 struct ext4_group_desc *gdp, 1887 struct ext4_group_desc *gdp,
1888 struct buffer_head *bh, int sz); 1888 struct buffer_head *bh);
1889 1889
1890/* balloc.c */ 1890/* balloc.c */
1891extern void ext4_validate_block_bitmap(struct super_block *sb, 1891extern void ext4_validate_block_bitmap(struct super_block *sb,
@@ -2063,8 +2063,7 @@ extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
2063extern int ext4_calculate_overhead(struct super_block *sb); 2063extern int ext4_calculate_overhead(struct super_block *sb);
2064extern int ext4_superblock_csum_verify(struct super_block *sb, 2064extern int ext4_superblock_csum_verify(struct super_block *sb,
2065 struct ext4_super_block *es); 2065 struct ext4_super_block *es);
2066extern void ext4_superblock_csum_set(struct super_block *sb, 2066extern void ext4_superblock_csum_set(struct super_block *sb);
2067 struct ext4_super_block *es);
2068extern void *ext4_kvmalloc(size_t size, gfp_t flags); 2067extern void *ext4_kvmalloc(size_t size, gfp_t flags);
2069extern void *ext4_kvzalloc(size_t size, gfp_t flags); 2068extern void *ext4_kvzalloc(size_t size, gfp_t flags);
2070extern void ext4_kvfree(void *ptr); 2069extern void ext4_kvfree(void *ptr);
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index bfa65b49d424..b4323ba846b5 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -143,17 +143,13 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
143 struct buffer_head *bh = EXT4_SB(sb)->s_sbh; 143 struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
144 int err = 0; 144 int err = 0;
145 145
146 ext4_superblock_csum_set(sb);
146 if (ext4_handle_valid(handle)) { 147 if (ext4_handle_valid(handle)) {
147 ext4_superblock_csum_set(sb,
148 (struct ext4_super_block *)bh->b_data);
149 err = jbd2_journal_dirty_metadata(handle, bh); 148 err = jbd2_journal_dirty_metadata(handle, bh);
150 if (err) 149 if (err)
151 ext4_journal_abort_handle(where, line, __func__, 150 ext4_journal_abort_handle(where, line, __func__,
152 bh, handle, err); 151 bh, handle, err);
153 } else { 152 } else
154 ext4_superblock_csum_set(sb,
155 (struct ext4_super_block *)bh->b_data);
156 mark_buffer_dirty(bh); 153 mark_buffer_dirty(bh);
157 }
158 return err; 154 return err;
159} 155}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 1c94cca35ed1..7011ac967208 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -52,6 +52,9 @@
52#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ 52#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
53#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ 53#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
54 54
55#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
56#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
57
55static __le32 ext4_extent_block_csum(struct inode *inode, 58static __le32 ext4_extent_block_csum(struct inode *inode,
56 struct ext4_extent_header *eh) 59 struct ext4_extent_header *eh)
57{ 60{
@@ -2914,6 +2917,9 @@ static int ext4_split_extent_at(handle_t *handle,
2914 unsigned int ee_len, depth; 2917 unsigned int ee_len, depth;
2915 int err = 0; 2918 int err = 0;
2916 2919
2920 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
2921 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
2922
2917 ext_debug("ext4_split_extents_at: inode %lu, logical" 2923 ext_debug("ext4_split_extents_at: inode %lu, logical"
2918 "block %llu\n", inode->i_ino, (unsigned long long)split); 2924 "block %llu\n", inode->i_ino, (unsigned long long)split);
2919 2925
@@ -2972,7 +2978,14 @@ static int ext4_split_extent_at(handle_t *handle,
2972 2978
2973 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 2979 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2974 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 2980 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2975 err = ext4_ext_zeroout(inode, &orig_ex); 2981 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
2982 if (split_flag & EXT4_EXT_DATA_VALID1)
2983 err = ext4_ext_zeroout(inode, ex2);
2984 else
2985 err = ext4_ext_zeroout(inode, ex);
2986 } else
2987 err = ext4_ext_zeroout(inode, &orig_ex);
2988
2976 if (err) 2989 if (err)
2977 goto fix_extent_len; 2990 goto fix_extent_len;
2978 /* update the extent length and mark as initialized */ 2991 /* update the extent length and mark as initialized */
@@ -3025,12 +3038,13 @@ static int ext4_split_extent(handle_t *handle,
3025 uninitialized = ext4_ext_is_uninitialized(ex); 3038 uninitialized = ext4_ext_is_uninitialized(ex);
3026 3039
3027 if (map->m_lblk + map->m_len < ee_block + ee_len) { 3040 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3028 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? 3041 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3029 EXT4_EXT_MAY_ZEROOUT : 0;
3030 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3042 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3031 if (uninitialized) 3043 if (uninitialized)
3032 split_flag1 |= EXT4_EXT_MARK_UNINIT1 | 3044 split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3033 EXT4_EXT_MARK_UNINIT2; 3045 EXT4_EXT_MARK_UNINIT2;
3046 if (split_flag & EXT4_EXT_DATA_VALID2)
3047 split_flag1 |= EXT4_EXT_DATA_VALID1;
3034 err = ext4_split_extent_at(handle, inode, path, 3048 err = ext4_split_extent_at(handle, inode, path,
3035 map->m_lblk + map->m_len, split_flag1, flags1); 3049 map->m_lblk + map->m_len, split_flag1, flags1);
3036 if (err) 3050 if (err)
@@ -3043,8 +3057,8 @@ static int ext4_split_extent(handle_t *handle,
3043 return PTR_ERR(path); 3057 return PTR_ERR(path);
3044 3058
3045 if (map->m_lblk >= ee_block) { 3059 if (map->m_lblk >= ee_block) {
3046 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? 3060 split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
3047 EXT4_EXT_MAY_ZEROOUT : 0; 3061 EXT4_EXT_DATA_VALID2);
3048 if (uninitialized) 3062 if (uninitialized)
3049 split_flag1 |= EXT4_EXT_MARK_UNINIT1; 3063 split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3050 if (split_flag & EXT4_EXT_MARK_UNINIT2) 3064 if (split_flag & EXT4_EXT_MARK_UNINIT2)
@@ -3323,26 +3337,47 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3323 3337
3324 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3338 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3325 split_flag |= EXT4_EXT_MARK_UNINIT2; 3339 split_flag |= EXT4_EXT_MARK_UNINIT2;
3326 3340 if (flags & EXT4_GET_BLOCKS_CONVERT)
3341 split_flag |= EXT4_EXT_DATA_VALID2;
3327 flags |= EXT4_GET_BLOCKS_PRE_IO; 3342 flags |= EXT4_GET_BLOCKS_PRE_IO;
3328 return ext4_split_extent(handle, inode, path, map, split_flag, flags); 3343 return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3329} 3344}
3330 3345
3331static int ext4_convert_unwritten_extents_endio(handle_t *handle, 3346static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3332 struct inode *inode, 3347 struct inode *inode,
3333 struct ext4_ext_path *path) 3348 struct ext4_map_blocks *map,
3349 struct ext4_ext_path *path)
3334{ 3350{
3335 struct ext4_extent *ex; 3351 struct ext4_extent *ex;
3352 ext4_lblk_t ee_block;
3353 unsigned int ee_len;
3336 int depth; 3354 int depth;
3337 int err = 0; 3355 int err = 0;
3338 3356
3339 depth = ext_depth(inode); 3357 depth = ext_depth(inode);
3340 ex = path[depth].p_ext; 3358 ex = path[depth].p_ext;
3359 ee_block = le32_to_cpu(ex->ee_block);
3360 ee_len = ext4_ext_get_actual_len(ex);
3341 3361
3342 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3362 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3343 "block %llu, max_blocks %u\n", inode->i_ino, 3363 "block %llu, max_blocks %u\n", inode->i_ino,
3344 (unsigned long long)le32_to_cpu(ex->ee_block), 3364 (unsigned long long)ee_block, ee_len);
3345 ext4_ext_get_actual_len(ex)); 3365
3366 /* If extent is larger than requested then split is required */
3367 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3368 err = ext4_split_unwritten_extents(handle, inode, map, path,
3369 EXT4_GET_BLOCKS_CONVERT);
3370 if (err < 0)
3371 goto out;
3372 ext4_ext_drop_refs(path);
3373 path = ext4_ext_find_extent(inode, map->m_lblk, path);
3374 if (IS_ERR(path)) {
3375 err = PTR_ERR(path);
3376 goto out;
3377 }
3378 depth = ext_depth(inode);
3379 ex = path[depth].p_ext;
3380 }
3346 3381
3347 err = ext4_ext_get_access(handle, inode, path + depth); 3382 err = ext4_ext_get_access(handle, inode, path + depth);
3348 if (err) 3383 if (err)
@@ -3652,7 +3687,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3652 } 3687 }
3653 /* IO end_io complete, convert the filled extent to written */ 3688 /* IO end_io complete, convert the filled extent to written */
3654 if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3689 if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3655 ret = ext4_convert_unwritten_extents_endio(handle, inode, 3690 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
3656 path); 3691 path);
3657 if (ret >= 0) { 3692 if (ret >= 0) {
3658 ext4_update_inode_fsync_trans(handle, inode, 1); 3693 ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -4428,6 +4463,9 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4428 */ 4463 */
4429 if (len <= EXT_UNINIT_MAX_LEN << blkbits) 4464 if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4430 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 4465 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4466
4467 /* Prevent race condition between unwritten */
4468 ext4_flush_unwritten_io(inode);
4431retry: 4469retry:
4432 while (ret >= 0 && ret < max_blocks) { 4470 while (ret >= 0 && ret < max_blocks) {
4433 map.m_lblk = map.m_lblk + ret; 4471 map.m_lblk = map.m_lblk + ret;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index fa36372f3fdf..4facdd29a350 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -762,9 +762,7 @@ got:
762 ext4_free_group_clusters_set(sb, gdp, 762 ext4_free_group_clusters_set(sb, gdp,
763 ext4_free_clusters_after_init(sb, group, gdp)); 763 ext4_free_clusters_after_init(sb, group, gdp));
764 ext4_block_bitmap_csum_set(sb, group, gdp, 764 ext4_block_bitmap_csum_set(sb, group, gdp,
765 block_bitmap_bh, 765 block_bitmap_bh);
766 EXT4_BLOCKS_PER_GROUP(sb) /
767 8);
768 ext4_group_desc_csum_set(sb, group, gdp); 766 ext4_group_desc_csum_set(sb, group, gdp);
769 } 767 }
770 ext4_unlock_group(sb, group); 768 ext4_unlock_group(sb, group);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f8b27bf80aca..526e55358606 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2805,8 +2805,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2805 } 2805 }
2806 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 2806 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2807 ext4_free_group_clusters_set(sb, gdp, len); 2807 ext4_free_group_clusters_set(sb, gdp, len);
2808 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh, 2808 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
2809 EXT4_BLOCKS_PER_GROUP(sb) / 8);
2810 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 2809 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
2811 2810
2812 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2811 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
@@ -4666,8 +4665,7 @@ do_more:
4666 4665
4667 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 4666 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4668 ext4_free_group_clusters_set(sb, gdp, ret); 4667 ext4_free_group_clusters_set(sb, gdp, ret);
4669 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh, 4668 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
4670 EXT4_BLOCKS_PER_GROUP(sb) / 8);
4671 ext4_group_desc_csum_set(sb, block_group, gdp); 4669 ext4_group_desc_csum_set(sb, block_group, gdp);
4672 ext4_unlock_group(sb, block_group); 4670 ext4_unlock_group(sb, block_group);
4673 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); 4671 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
@@ -4811,8 +4809,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4811 mb_free_blocks(NULL, &e4b, bit, count); 4809 mb_free_blocks(NULL, &e4b, bit, count);
4812 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); 4810 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
4813 ext4_free_group_clusters_set(sb, desc, blk_free_count); 4811 ext4_free_group_clusters_set(sb, desc, blk_free_count);
4814 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh, 4812 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
4815 EXT4_BLOCKS_PER_GROUP(sb) / 8);
4816 ext4_group_desc_csum_set(sb, block_group, desc); 4813 ext4_group_desc_csum_set(sb, block_group, desc);
4817 ext4_unlock_group(sb, block_group); 4814 ext4_unlock_group(sb, block_group);
4818 percpu_counter_add(&sbi->s_freeclusters_counter, 4815 percpu_counter_add(&sbi->s_freeclusters_counter,
@@ -4993,8 +4990,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4993 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 4990 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
4994 range->minlen >> sb->s_blocksize_bits); 4991 range->minlen >> sb->s_blocksize_bits);
4995 4992
4996 if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) || 4993 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
4997 unlikely(start >= max_blks)) 4994 start >= max_blks ||
4995 range->len < sb->s_blocksize)
4998 return -EINVAL; 4996 return -EINVAL;
4999 if (end >= max_blks) 4997 if (end >= max_blks)
5000 end = max_blks - 1; 4998 end = max_blks - 1;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 7a75e1086961..47bf06a2765d 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1212,8 +1212,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
1212 bh = ext4_get_bitmap(sb, group_data->block_bitmap); 1212 bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1213 if (!bh) 1213 if (!bh)
1214 return -EIO; 1214 return -EIO;
1215 ext4_block_bitmap_csum_set(sb, group, gdp, bh, 1215 ext4_block_bitmap_csum_set(sb, group, gdp, bh);
1216 EXT4_BLOCKS_PER_GROUP(sb) / 8);
1217 brelse(bh); 1216 brelse(bh);
1218 1217
1219 return 0; 1218 return 0;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 7265a0367476..80928f716850 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -143,9 +143,10 @@ int ext4_superblock_csum_verify(struct super_block *sb,
143 return es->s_checksum == ext4_superblock_csum(sb, es); 143 return es->s_checksum == ext4_superblock_csum(sb, es);
144} 144}
145 145
146void ext4_superblock_csum_set(struct super_block *sb, 146void ext4_superblock_csum_set(struct super_block *sb)
147 struct ext4_super_block *es)
148{ 147{
148 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
149
149 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 150 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
150 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 151 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
151 return; 152 return;
@@ -1963,7 +1964,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
1963 sbi->s_log_groups_per_flex = 0; 1964 sbi->s_log_groups_per_flex = 0;
1964 return 1; 1965 return 1;
1965 } 1966 }
1966 groups_per_flex = 1 << sbi->s_log_groups_per_flex; 1967 groups_per_flex = 1U << sbi->s_log_groups_per_flex;
1967 1968
1968 err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); 1969 err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
1969 if (err) 1970 if (err)
@@ -4381,7 +4382,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
4381 cpu_to_le32(percpu_counter_sum_positive( 4382 cpu_to_le32(percpu_counter_sum_positive(
4382 &EXT4_SB(sb)->s_freeinodes_counter)); 4383 &EXT4_SB(sb)->s_freeinodes_counter));
4383 BUFFER_TRACE(sbh, "marking dirty"); 4384 BUFFER_TRACE(sbh, "marking dirty");
4384 ext4_superblock_csum_set(sb, es); 4385 ext4_superblock_csum_set(sb);
4385 mark_buffer_dirty(sbh); 4386 mark_buffer_dirty(sbh);
4386 if (sync) { 4387 if (sync) {
4387 error = sync_dirty_buffer(sbh); 4388 error = sync_dirty_buffer(sbh);
diff --git a/fs/jfs/jfs_discard.c b/fs/jfs/jfs_discard.c
index 9947563e4175..dfcd50304559 100644
--- a/fs/jfs/jfs_discard.c
+++ b/fs/jfs/jfs_discard.c
@@ -83,7 +83,7 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
83 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 83 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
84 struct super_block *sb = ipbmap->i_sb; 84 struct super_block *sb = ipbmap->i_sb;
85 int agno, agno_end; 85 int agno, agno_end;
86 s64 start, end, minlen; 86 u64 start, end, minlen;
87 u64 trimmed = 0; 87 u64 trimmed = 0;
88 88
89 /** 89 /**
@@ -93,15 +93,19 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
93 * minlen: minimum extent length in Bytes 93 * minlen: minimum extent length in Bytes
94 */ 94 */
95 start = range->start >> sb->s_blocksize_bits; 95 start = range->start >> sb->s_blocksize_bits;
96 if (start < 0)
97 start = 0;
98 end = start + (range->len >> sb->s_blocksize_bits) - 1; 96 end = start + (range->len >> sb->s_blocksize_bits) - 1;
99 if (end >= bmp->db_mapsize)
100 end = bmp->db_mapsize - 1;
101 minlen = range->minlen >> sb->s_blocksize_bits; 97 minlen = range->minlen >> sb->s_blocksize_bits;
102 if (minlen <= 0) 98 if (minlen == 0)
103 minlen = 1; 99 minlen = 1;
104 100
101 if (minlen > bmp->db_agsize ||
102 start >= bmp->db_mapsize ||
103 range->len < sb->s_blocksize)
104 return -EINVAL;
105
106 if (end >= bmp->db_mapsize)
107 end = bmp->db_mapsize - 1;
108
105 /** 109 /**
106 * we trim all ag's within the range 110 * we trim all ag's within the range
107 */ 111 */
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index e4fb3ba5a58a..3d7e09bcc0e9 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -85,29 +85,38 @@ static struct rpc_clnt *nsm_create(struct net *net)
85 return rpc_create(&args); 85 return rpc_create(&args);
86} 86}
87 87
88static struct rpc_clnt *nsm_client_set(struct lockd_net *ln,
89 struct rpc_clnt *clnt)
90{
91 spin_lock(&ln->nsm_clnt_lock);
92 if (ln->nsm_users == 0) {
93 if (clnt == NULL)
94 goto out;
95 ln->nsm_clnt = clnt;
96 }
97 clnt = ln->nsm_clnt;
98 ln->nsm_users++;
99out:
100 spin_unlock(&ln->nsm_clnt_lock);
101 return clnt;
102}
103
88static struct rpc_clnt *nsm_client_get(struct net *net) 104static struct rpc_clnt *nsm_client_get(struct net *net)
89{ 105{
90 static DEFINE_MUTEX(nsm_create_mutex); 106 struct rpc_clnt *clnt, *new;
91 struct rpc_clnt *clnt;
92 struct lockd_net *ln = net_generic(net, lockd_net_id); 107 struct lockd_net *ln = net_generic(net, lockd_net_id);
93 108
94 spin_lock(&ln->nsm_clnt_lock); 109 clnt = nsm_client_set(ln, NULL);
95 if (ln->nsm_users) { 110 if (clnt != NULL)
96 ln->nsm_users++;
97 clnt = ln->nsm_clnt;
98 spin_unlock(&ln->nsm_clnt_lock);
99 goto out; 111 goto out;
100 }
101 spin_unlock(&ln->nsm_clnt_lock);
102 112
103 mutex_lock(&nsm_create_mutex); 113 clnt = new = nsm_create(net);
104 clnt = nsm_create(net); 114 if (IS_ERR(clnt))
105 if (!IS_ERR(clnt)) { 115 goto out;
106 ln->nsm_clnt = clnt; 116
107 smp_wmb(); 117 clnt = nsm_client_set(ln, new);
108 ln->nsm_users = 1; 118 if (clnt != new)
109 } 119 rpc_shutdown_client(new);
110 mutex_unlock(&nsm_create_mutex);
111out: 120out:
112 return clnt; 121 return clnt;
113} 122}
@@ -115,18 +124,16 @@ out:
115static void nsm_client_put(struct net *net) 124static void nsm_client_put(struct net *net)
116{ 125{
117 struct lockd_net *ln = net_generic(net, lockd_net_id); 126 struct lockd_net *ln = net_generic(net, lockd_net_id);
118 struct rpc_clnt *clnt = ln->nsm_clnt; 127 struct rpc_clnt *clnt = NULL;
119 int shutdown = 0;
120 128
121 spin_lock(&ln->nsm_clnt_lock); 129 spin_lock(&ln->nsm_clnt_lock);
122 if (ln->nsm_users) { 130 ln->nsm_users--;
123 if (--ln->nsm_users) 131 if (ln->nsm_users == 0) {
124 ln->nsm_clnt = NULL; 132 clnt = ln->nsm_clnt;
125 shutdown = !ln->nsm_users; 133 ln->nsm_clnt = NULL;
126 } 134 }
127 spin_unlock(&ln->nsm_clnt_lock); 135 spin_unlock(&ln->nsm_clnt_lock);
128 136 if (clnt != NULL)
129 if (shutdown)
130 rpc_shutdown_client(clnt); 137 rpc_shutdown_client(clnt);
131} 138}
132 139
diff --git a/fs/namei.c b/fs/namei.c
index d1895f308156..937f9d50c84b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -705,8 +705,8 @@ static inline void put_link(struct nameidata *nd, struct path *link, void *cooki
705 path_put(link); 705 path_put(link);
706} 706}
707 707
708int sysctl_protected_symlinks __read_mostly = 1; 708int sysctl_protected_symlinks __read_mostly = 0;
709int sysctl_protected_hardlinks __read_mostly = 1; 709int sysctl_protected_hardlinks __read_mostly = 0;
710 710
711/** 711/**
712 * may_follow_link - Check symlink following for unsafe situations 712 * may_follow_link - Check symlink following for unsafe situations
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 9a521fb39869..5088b57b078a 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -241,7 +241,7 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
241 svc_exit_thread(cb_info->rqst); 241 svc_exit_thread(cb_info->rqst);
242 cb_info->rqst = NULL; 242 cb_info->rqst = NULL;
243 cb_info->task = NULL; 243 cb_info->task = NULL;
244 return PTR_ERR(cb_info->task); 244 return ret;
245 } 245 }
246 dprintk("nfs_callback_up: service started\n"); 246 dprintk("nfs_callback_up: service started\n");
247 return 0; 247 return 0;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 52d847212066..2e45fd9c02a3 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -122,12 +122,21 @@ static void filelayout_reset_read(struct nfs_read_data *data)
122 } 122 }
123} 123}
124 124
125static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
126{
127 if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
128 return;
129 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
130 pnfs_return_layout(inode);
131}
132
125static int filelayout_async_handle_error(struct rpc_task *task, 133static int filelayout_async_handle_error(struct rpc_task *task,
126 struct nfs4_state *state, 134 struct nfs4_state *state,
127 struct nfs_client *clp, 135 struct nfs_client *clp,
128 struct pnfs_layout_segment *lseg) 136 struct pnfs_layout_segment *lseg)
129{ 137{
130 struct inode *inode = lseg->pls_layout->plh_inode; 138 struct pnfs_layout_hdr *lo = lseg->pls_layout;
139 struct inode *inode = lo->plh_inode;
131 struct nfs_server *mds_server = NFS_SERVER(inode); 140 struct nfs_server *mds_server = NFS_SERVER(inode);
132 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); 141 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
133 struct nfs_client *mds_client = mds_server->nfs_client; 142 struct nfs_client *mds_client = mds_server->nfs_client;
@@ -204,10 +213,8 @@ static int filelayout_async_handle_error(struct rpc_task *task,
204 dprintk("%s DS connection error %d\n", __func__, 213 dprintk("%s DS connection error %d\n", __func__,
205 task->tk_status); 214 task->tk_status);
206 nfs4_mark_deviceid_unavailable(devid); 215 nfs4_mark_deviceid_unavailable(devid);
207 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags); 216 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
208 _pnfs_return_layout(inode);
209 rpc_wake_up(&tbl->slot_tbl_waitq); 217 rpc_wake_up(&tbl->slot_tbl_waitq);
210 nfs4_ds_disconnect(clp);
211 /* fall through */ 218 /* fall through */
212 default: 219 default:
213reset: 220reset:
@@ -331,7 +338,9 @@ static void filelayout_read_count_stats(struct rpc_task *task, void *data)
331static void filelayout_read_release(void *data) 338static void filelayout_read_release(void *data)
332{ 339{
333 struct nfs_read_data *rdata = data; 340 struct nfs_read_data *rdata = data;
341 struct pnfs_layout_hdr *lo = rdata->header->lseg->pls_layout;
334 342
343 filelayout_fenceme(lo->plh_inode, lo);
335 nfs_put_client(rdata->ds_clp); 344 nfs_put_client(rdata->ds_clp);
336 rdata->header->mds_ops->rpc_release(data); 345 rdata->header->mds_ops->rpc_release(data);
337} 346}
@@ -429,7 +438,9 @@ static void filelayout_write_count_stats(struct rpc_task *task, void *data)
429static void filelayout_write_release(void *data) 438static void filelayout_write_release(void *data)
430{ 439{
431 struct nfs_write_data *wdata = data; 440 struct nfs_write_data *wdata = data;
441 struct pnfs_layout_hdr *lo = wdata->header->lseg->pls_layout;
432 442
443 filelayout_fenceme(lo->plh_inode, lo);
433 nfs_put_client(wdata->ds_clp); 444 nfs_put_client(wdata->ds_clp);
434 wdata->header->mds_ops->rpc_release(data); 445 wdata->header->mds_ops->rpc_release(data);
435} 446}
@@ -739,7 +750,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
739 goto out_err; 750 goto out_err;
740 751
741 if (fl->num_fh > 0) { 752 if (fl->num_fh > 0) {
742 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), 753 fl->fh_array = kcalloc(fl->num_fh, sizeof(fl->fh_array[0]),
743 gfp_flags); 754 gfp_flags);
744 if (!fl->fh_array) 755 if (!fl->fh_array)
745 goto out_err; 756 goto out_err;
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index dca47d786710..8c07241fe52b 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -149,6 +149,5 @@ extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
149extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); 149extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
150struct nfs4_file_layout_dsaddr * 150struct nfs4_file_layout_dsaddr *
151filelayout_get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags); 151filelayout_get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
152void nfs4_ds_disconnect(struct nfs_client *clp);
153 152
154#endif /* FS_NFS_NFS4FILELAYOUT_H */ 153#endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 3336d5eaf879..a8eaa9b7bb0f 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -149,28 +149,6 @@ _data_server_lookup_locked(const struct list_head *dsaddrs)
149} 149}
150 150
151/* 151/*
152 * Lookup DS by nfs_client pointer. Zero data server client pointer
153 */
154void nfs4_ds_disconnect(struct nfs_client *clp)
155{
156 struct nfs4_pnfs_ds *ds;
157 struct nfs_client *found = NULL;
158
159 dprintk("%s clp %p\n", __func__, clp);
160 spin_lock(&nfs4_ds_cache_lock);
161 list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
162 if (ds->ds_clp && ds->ds_clp == clp) {
163 found = ds->ds_clp;
164 ds->ds_clp = NULL;
165 }
166 spin_unlock(&nfs4_ds_cache_lock);
167 if (found) {
168 set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
169 nfs_put_client(clp);
170 }
171}
172
173/*
174 * Create an rpc connection to the nfs4_pnfs_ds data server 152 * Create an rpc connection to the nfs4_pnfs_ds data server
175 * Currently only supports IPv4 and IPv6 addresses 153 * Currently only supports IPv4 and IPv6 addresses
176 */ 154 */
diff --git a/fs/nfs/nfs4getroot.c b/fs/nfs/nfs4getroot.c
index 6a83780e0ce6..549462e5b9b0 100644
--- a/fs/nfs/nfs4getroot.c
+++ b/fs/nfs/nfs4getroot.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/nfs_fs.h> 6#include <linux/nfs_fs.h>
7#include "nfs4_fs.h" 7#include "nfs4_fs.h"
8#include "internal.h"
8 9
9#define NFSDBG_FACILITY NFSDBG_CLIENT 10#define NFSDBG_FACILITY NFSDBG_CLIENT
10 11
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index be731e6b7b9c..c6f990656f89 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -369,7 +369,7 @@ void objio_free_result(struct objlayout_io_res *oir)
369 kfree(objios); 369 kfree(objios);
370} 370}
371 371
372enum pnfs_osd_errno osd_pri_2_pnfs_err(enum osd_err_priority oep) 372static enum pnfs_osd_errno osd_pri_2_pnfs_err(enum osd_err_priority oep)
373{ 373{
374 switch (oep) { 374 switch (oep) {
375 case OSD_ERR_PRI_NO_ERROR: 375 case OSD_ERR_PRI_NO_ERROR:
@@ -574,7 +574,7 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
574 (unsigned long)pgio->pg_layout_private; 574 (unsigned long)pgio->pg_layout_private;
575} 575}
576 576
577void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 577static void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
578{ 578{
579 pnfs_generic_pg_init_read(pgio, req); 579 pnfs_generic_pg_init_read(pgio, req);
580 if (unlikely(pgio->pg_lseg == NULL)) 580 if (unlikely(pgio->pg_lseg == NULL))
@@ -604,7 +604,7 @@ static bool aligned_on_raid_stripe(u64 offset, struct ore_layout *layout,
604 return false; 604 return false;
605} 605}
606 606
607void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 607static void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
608{ 608{
609 unsigned long stripe_end = 0; 609 unsigned long stripe_end = 0;
610 u64 wb_size; 610 u64 wb_size;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 2d722dba1111..dbf7bba52da0 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -62,6 +62,7 @@ enum {
62 NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ 62 NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
63 NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */ 63 NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
64 NFS_LAYOUT_ROC, /* some lseg had roc bit set */ 64 NFS_LAYOUT_ROC, /* some lseg had roc bit set */
65 NFS_LAYOUT_RETURN, /* Return this layout ASAP */
65}; 66};
66 67
67enum layoutdriver_policy_flags { 68enum layoutdriver_policy_flags {
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 64c3b3172367..e296572c73ed 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -45,10 +45,13 @@ static cputime64_t get_iowait_time(int cpu)
45 45
46static u64 get_idle_time(int cpu) 46static u64 get_idle_time(int cpu)
47{ 47{
48 u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL); 48 u64 idle, idle_time = -1ULL;
49
50 if (cpu_online(cpu))
51 idle_time = get_cpu_idle_time_us(cpu, NULL);
49 52
50 if (idle_time == -1ULL) 53 if (idle_time == -1ULL)
51 /* !NO_HZ so we can rely on cpustat.idle */ 54 /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
52 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; 55 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
53 else 56 else
54 idle = usecs_to_cputime64(idle_time); 57 idle = usecs_to_cputime64(idle_time);
@@ -58,10 +61,13 @@ static u64 get_idle_time(int cpu)
58 61
59static u64 get_iowait_time(int cpu) 62static u64 get_iowait_time(int cpu)
60{ 63{
61 u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL); 64 u64 iowait, iowait_time = -1ULL;
65
66 if (cpu_online(cpu))
67 iowait_time = get_cpu_iowait_time_us(cpu, NULL);
62 68
63 if (iowait_time == -1ULL) 69 if (iowait_time == -1ULL)
64 /* !NO_HZ so we can rely on cpustat.iowait */ 70 /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
65 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; 71 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
66 else 72 else
67 iowait = usecs_to_cputime64(iowait_time); 73 iowait = usecs_to_cputime64(iowait_time);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index c78bb997e2c6..af1cbaf535ed 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -205,6 +205,8 @@
205 {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 205 {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
206 {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 206 {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
207 {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 207 {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
208 {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
209 {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
208 {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 210 {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
209 {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 211 {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
210 {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 212 {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
@@ -217,6 +219,7 @@
217 {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 219 {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
218 {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 220 {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
219 {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 221 {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
222 {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
220 {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 223 {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
221 {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 224 {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
222 {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 225 {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 569d67d4243e..d452ee191066 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -57,6 +57,7 @@ int memblock_add(phys_addr_t base, phys_addr_t size);
57int memblock_remove(phys_addr_t base, phys_addr_t size); 57int memblock_remove(phys_addr_t base, phys_addr_t size);
58int memblock_free(phys_addr_t base, phys_addr_t size); 58int memblock_free(phys_addr_t base, phys_addr_t size);
59int memblock_reserve(phys_addr_t base, phys_addr_t size); 59int memblock_reserve(phys_addr_t base, phys_addr_t size);
60void memblock_trim_memory(phys_addr_t align);
60 61
61#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 62#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
62void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, 63void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2e902359aee5..6bfb2faa0b19 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -803,12 +803,16 @@ static inline void perf_event_task_tick(void) { }
803do { \ 803do { \
804 static struct notifier_block fn##_nb __cpuinitdata = \ 804 static struct notifier_block fn##_nb __cpuinitdata = \
805 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ 805 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
806 unsigned long cpu = smp_processor_id(); \
807 unsigned long flags; \
806 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ 808 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
807 (void *)(unsigned long)smp_processor_id()); \ 809 (void *)(unsigned long)cpu); \
810 local_irq_save(flags); \
808 fn(&fn##_nb, (unsigned long)CPU_STARTING, \ 811 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
809 (void *)(unsigned long)smp_processor_id()); \ 812 (void *)(unsigned long)cpu); \
813 local_irq_restore(flags); \
810 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ 814 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
811 (void *)(unsigned long)smp_processor_id()); \ 815 (void *)(unsigned long)cpu); \
812 register_cpu_notifier(&fn##_nb); \ 816 register_cpu_notifier(&fn##_nb); \
813} while (0) 817} while (0)
814 818
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 214caa33433b..2ac60c9cf644 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -24,6 +24,7 @@
24#ifndef _LINUX_RBTREE_AUGMENTED_H 24#ifndef _LINUX_RBTREE_AUGMENTED_H
25#define _LINUX_RBTREE_AUGMENTED_H 25#define _LINUX_RBTREE_AUGMENTED_H
26 26
27#include <linux/compiler.h>
27#include <linux/rbtree.h> 28#include <linux/rbtree.h>
28 29
29/* 30/*
diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h
index d9b0c84220c7..8f721e465e05 100644
--- a/include/linux/spi/tsc2005.h
+++ b/include/linux/spi/tsc2005.h
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2009-2010 Nokia Corporation 4 * Copyright (C) 2009-2010 Nokia Corporation
5 * 5 *
6 * Contact: Aaro Koskinen <aaro.koskinen@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index e6f0331e3d45..24594571c5a3 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -35,16 +35,6 @@ struct inode;
35# include <asm/uprobes.h> 35# include <asm/uprobes.h>
36#endif 36#endif
37 37
38/* flags that denote/change uprobes behaviour */
39
40/* Have a copy of original instruction */
41#define UPROBE_COPY_INSN 0x1
42
43/* Dont run handlers when first register/ last unregister in progress*/
44#define UPROBE_RUN_HANDLER 0x2
45/* Can skip singlestep */
46#define UPROBE_SKIP_SSTEP 0x4
47
48struct uprobe_consumer { 38struct uprobe_consumer {
49 int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); 39 int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
50 /* 40 /*
@@ -59,7 +49,6 @@ struct uprobe_consumer {
59#ifdef CONFIG_UPROBES 49#ifdef CONFIG_UPROBES
60enum uprobe_task_state { 50enum uprobe_task_state {
61 UTASK_RUNNING, 51 UTASK_RUNNING,
62 UTASK_BP_HIT,
63 UTASK_SSTEP, 52 UTASK_SSTEP,
64 UTASK_SSTEP_ACK, 53 UTASK_SSTEP_ACK,
65 UTASK_SSTEP_TRAPPED, 54 UTASK_SSTEP_TRAPPED,
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index aecee9d112cb..694dcaf266e6 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -170,7 +170,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
170 unmap->dev_bus_addr = 0; 170 unmap->dev_bus_addr = 0;
171} 171}
172 172
173int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, 173int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
174 unsigned long max_nr_gframes, 174 unsigned long max_nr_gframes,
175 void **__shared); 175 void **__shared);
176int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, 176int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index f9f8b975ae74..e40fae9bf11a 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -310,7 +310,7 @@ struct gnttab_setup_table {
310 uint32_t nr_frames; 310 uint32_t nr_frames;
311 /* OUT parameters. */ 311 /* OUT parameters. */
312 int16_t status; /* GNTST_* */ 312 int16_t status; /* GNTST_* */
313 GUEST_HANDLE(ulong) frame_list; 313 GUEST_HANDLE(xen_pfn_t) frame_list;
314}; 314};
315DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table); 315DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
316 316
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index b66d04ce6957..90712e2072d5 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -179,28 +179,8 @@ struct xen_add_to_physmap {
179}; 179};
180DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); 180DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
181 181
182/* 182/*** REMOVED ***/
183 * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error 183/*#define XENMEM_translate_gpfn_list 8*/
184 * code on failure. This call only works for auto-translated guests.
185 */
186#define XENMEM_translate_gpfn_list 8
187struct xen_translate_gpfn_list {
188 /* Which domain to translate for? */
189 domid_t domid;
190
191 /* Length of list. */
192 xen_ulong_t nr_gpfns;
193
194 /* List of GPFNs to translate. */
195 GUEST_HANDLE(ulong) gpfn_list;
196
197 /*
198 * Output list to contain MFN translations. May be the same as the input
199 * list (in which case each input GPFN is overwritten with the output MFN).
200 */
201 GUEST_HANDLE(ulong) mfn_list;
202};
203DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
204 184
205/* 185/*
206 * Returns the pseudo-physical memory map as it was when the domain 186 * Returns the pseudo-physical memory map as it was when the domain
diff --git a/kernel/Makefile b/kernel/Makefile
index 0dfeca4324ee..86e3285ae7e5 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -174,10 +174,8 @@ signing_key.priv signing_key.x509: x509.genkey
174 @echo "###" 174 @echo "###"
175 @echo "### If this takes a long time, you might wish to run rngd in the" 175 @echo "### If this takes a long time, you might wish to run rngd in the"
176 @echo "### background to keep the supply of entropy topped up. It" 176 @echo "### background to keep the supply of entropy topped up. It"
177 @echo "### needs to be run as root, and should use a hardware random" 177 @echo "### needs to be run as root, and uses a hardware random"
178 @echo "### number generator if one is available, eg:" 178 @echo "### number generator if one is available."
179 @echo "###"
180 @echo "### rngd -r /dev/hwrandom"
181 @echo "###" 179 @echo "###"
182 openssl req -new -nodes -utf8 $(sign_key_with_hash) -days 36500 -batch \ 180 openssl req -new -nodes -utf8 $(sign_key_with_hash) -days 36500 -batch \
183 -x509 -config x509.genkey \ 181 -x509 -config x509.genkey \
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 13774b3b39aa..f24f724620dd 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1962,9 +1962,8 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1962 * trading it for newcg is protected by cgroup_mutex, we're safe to drop 1962 * trading it for newcg is protected by cgroup_mutex, we're safe to drop
1963 * it here; it will be freed under RCU. 1963 * it here; it will be freed under RCU.
1964 */ 1964 */
1965 put_css_set(oldcg);
1966
1967 set_bit(CGRP_RELEASABLE, &oldcgrp->flags); 1965 set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1966 put_css_set(oldcg);
1968} 1967}
1969 1968
1970/** 1969/**
@@ -4815,31 +4814,20 @@ static const struct file_operations proc_cgroupstats_operations = {
4815 * 4814 *
4816 * A pointer to the shared css_set was automatically copied in 4815 * A pointer to the shared css_set was automatically copied in
4817 * fork.c by dup_task_struct(). However, we ignore that copy, since 4816 * fork.c by dup_task_struct(). However, we ignore that copy, since
4818 * it was not made under the protection of RCU, cgroup_mutex or 4817 * it was not made under the protection of RCU or cgroup_mutex, so
4819 * threadgroup_change_begin(), so it might no longer be a valid 4818 * might no longer be a valid cgroup pointer. cgroup_attach_task() might
4820 * cgroup pointer. cgroup_attach_task() might have already changed 4819 * have already changed current->cgroups, allowing the previously
4821 * current->cgroups, allowing the previously referenced cgroup 4820 * referenced cgroup group to be removed and freed.
4822 * group to be removed and freed.
4823 *
4824 * Outside the pointer validity we also need to process the css_set
4825 * inheritance between threadgoup_change_begin() and
4826 * threadgoup_change_end(), this way there is no leak in any process
4827 * wide migration performed by cgroup_attach_proc() that could otherwise
4828 * miss a thread because it is too early or too late in the fork stage.
4829 * 4821 *
4830 * At the point that cgroup_fork() is called, 'current' is the parent 4822 * At the point that cgroup_fork() is called, 'current' is the parent
4831 * task, and the passed argument 'child' points to the child task. 4823 * task, and the passed argument 'child' points to the child task.
4832 */ 4824 */
4833void cgroup_fork(struct task_struct *child) 4825void cgroup_fork(struct task_struct *child)
4834{ 4826{
4835 /* 4827 task_lock(current);
4836 * We don't need to task_lock() current because current->cgroups
4837 * can't be changed concurrently here. The parent obviously hasn't
4838 * exited and called cgroup_exit(), and we are synchronized against
4839 * cgroup migration through threadgroup_change_begin().
4840 */
4841 child->cgroups = current->cgroups; 4828 child->cgroups = current->cgroups;
4842 get_css_set(child->cgroups); 4829 get_css_set(child->cgroups);
4830 task_unlock(current);
4843 INIT_LIST_HEAD(&child->cg_list); 4831 INIT_LIST_HEAD(&child->cg_list);
4844} 4832}
4845 4833
@@ -4895,19 +4883,10 @@ void cgroup_post_fork(struct task_struct *child)
4895 */ 4883 */
4896 if (use_task_css_set_links) { 4884 if (use_task_css_set_links) {
4897 write_lock(&css_set_lock); 4885 write_lock(&css_set_lock);
4898 if (list_empty(&child->cg_list)) { 4886 task_lock(child);
4899 /* 4887 if (list_empty(&child->cg_list))
4900 * It's safe to use child->cgroups without task_lock()
4901 * here because we are protected through
4902 * threadgroup_change_begin() against concurrent
4903 * css_set change in cgroup_task_migrate(). Also
4904 * the task can't exit at that point until
4905 * wake_up_new_task() is called, so we are protected
4906 * against cgroup_exit() setting child->cgroup to
4907 * init_css_set.
4908 */
4909 list_add(&child->cg_list, &child->cgroups->tasks); 4888 list_add(&child->cg_list, &child->cgroups->tasks);
4910 } 4889 task_unlock(child);
4911 write_unlock(&css_set_lock); 4890 write_unlock(&css_set_lock);
4912 } 4891 }
4913} 4892}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 98256bc71ee1..5cc4e7e42e68 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -78,15 +78,23 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
78 */ 78 */
79static atomic_t uprobe_events = ATOMIC_INIT(0); 79static atomic_t uprobe_events = ATOMIC_INIT(0);
80 80
81/* Have a copy of original instruction */
82#define UPROBE_COPY_INSN 0
83/* Dont run handlers when first register/ last unregister in progress*/
84#define UPROBE_RUN_HANDLER 1
85/* Can skip singlestep */
86#define UPROBE_SKIP_SSTEP 2
87
81struct uprobe { 88struct uprobe {
82 struct rb_node rb_node; /* node in the rb tree */ 89 struct rb_node rb_node; /* node in the rb tree */
83 atomic_t ref; 90 atomic_t ref;
84 struct rw_semaphore consumer_rwsem; 91 struct rw_semaphore consumer_rwsem;
92 struct mutex copy_mutex; /* TODO: kill me and UPROBE_COPY_INSN */
85 struct list_head pending_list; 93 struct list_head pending_list;
86 struct uprobe_consumer *consumers; 94 struct uprobe_consumer *consumers;
87 struct inode *inode; /* Also hold a ref to inode */ 95 struct inode *inode; /* Also hold a ref to inode */
88 loff_t offset; 96 loff_t offset;
89 int flags; 97 unsigned long flags;
90 struct arch_uprobe arch; 98 struct arch_uprobe arch;
91}; 99};
92 100
@@ -100,17 +108,12 @@ struct uprobe {
100 */ 108 */
101static bool valid_vma(struct vm_area_struct *vma, bool is_register) 109static bool valid_vma(struct vm_area_struct *vma, bool is_register)
102{ 110{
103 if (!vma->vm_file) 111 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED;
104 return false;
105
106 if (!is_register)
107 return true;
108 112
109 if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) 113 if (is_register)
110 == (VM_READ|VM_EXEC)) 114 flags |= VM_WRITE;
111 return true;
112 115
113 return false; 116 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
114} 117}
115 118
116static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 119static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
@@ -193,19 +196,44 @@ bool __weak is_swbp_insn(uprobe_opcode_t *insn)
193 return *insn == UPROBE_SWBP_INSN; 196 return *insn == UPROBE_SWBP_INSN;
194} 197}
195 198
199static void copy_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *opcode)
200{
201 void *kaddr = kmap_atomic(page);
202 memcpy(opcode, kaddr + (vaddr & ~PAGE_MASK), UPROBE_SWBP_INSN_SIZE);
203 kunmap_atomic(kaddr);
204}
205
206static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
207{
208 uprobe_opcode_t old_opcode;
209 bool is_swbp;
210
211 copy_opcode(page, vaddr, &old_opcode);
212 is_swbp = is_swbp_insn(&old_opcode);
213
214 if (is_swbp_insn(new_opcode)) {
215 if (is_swbp) /* register: already installed? */
216 return 0;
217 } else {
218 if (!is_swbp) /* unregister: was it changed by us? */
219 return 0;
220 }
221
222 return 1;
223}
224
196/* 225/*
197 * NOTE: 226 * NOTE:
198 * Expect the breakpoint instruction to be the smallest size instruction for 227 * Expect the breakpoint instruction to be the smallest size instruction for
199 * the architecture. If an arch has variable length instruction and the 228 * the architecture. If an arch has variable length instruction and the
200 * breakpoint instruction is not of the smallest length instruction 229 * breakpoint instruction is not of the smallest length instruction
201 * supported by that architecture then we need to modify read_opcode / 230 * supported by that architecture then we need to modify is_swbp_at_addr and
202 * write_opcode accordingly. This would never be a problem for archs that 231 * write_opcode accordingly. This would never be a problem for archs that
203 * have fixed length instructions. 232 * have fixed length instructions.
204 */ 233 */
205 234
206/* 235/*
207 * write_opcode - write the opcode at a given virtual address. 236 * write_opcode - write the opcode at a given virtual address.
208 * @auprobe: arch breakpointing information.
209 * @mm: the probed process address space. 237 * @mm: the probed process address space.
210 * @vaddr: the virtual address to store the opcode. 238 * @vaddr: the virtual address to store the opcode.
211 * @opcode: opcode to be written at @vaddr. 239 * @opcode: opcode to be written at @vaddr.
@@ -216,8 +244,8 @@ bool __weak is_swbp_insn(uprobe_opcode_t *insn)
216 * For mm @mm, write the opcode at @vaddr. 244 * For mm @mm, write the opcode at @vaddr.
217 * Return 0 (success) or a negative errno. 245 * Return 0 (success) or a negative errno.
218 */ 246 */
219static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, 247static int write_opcode(struct mm_struct *mm, unsigned long vaddr,
220 unsigned long vaddr, uprobe_opcode_t opcode) 248 uprobe_opcode_t opcode)
221{ 249{
222 struct page *old_page, *new_page; 250 struct page *old_page, *new_page;
223 void *vaddr_old, *vaddr_new; 251 void *vaddr_old, *vaddr_new;
@@ -226,10 +254,14 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
226 254
227retry: 255retry:
228 /* Read the page with vaddr into memory */ 256 /* Read the page with vaddr into memory */
229 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma); 257 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
230 if (ret <= 0) 258 if (ret <= 0)
231 return ret; 259 return ret;
232 260
261 ret = verify_opcode(old_page, vaddr, &opcode);
262 if (ret <= 0)
263 goto put_old;
264
233 ret = -ENOMEM; 265 ret = -ENOMEM;
234 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 266 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
235 if (!new_page) 267 if (!new_page)
@@ -264,63 +296,6 @@ put_old:
264} 296}
265 297
266/** 298/**
267 * read_opcode - read the opcode at a given virtual address.
268 * @mm: the probed process address space.
269 * @vaddr: the virtual address to read the opcode.
270 * @opcode: location to store the read opcode.
271 *
272 * Called with mm->mmap_sem held (for read and with a reference to
273 * mm.
274 *
275 * For mm @mm, read the opcode at @vaddr and store it in @opcode.
276 * Return 0 (success) or a negative errno.
277 */
278static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
279{
280 struct page *page;
281 void *vaddr_new;
282 int ret;
283
284 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
285 if (ret <= 0)
286 return ret;
287
288 vaddr_new = kmap_atomic(page);
289 vaddr &= ~PAGE_MASK;
290 memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
291 kunmap_atomic(vaddr_new);
292
293 put_page(page);
294
295 return 0;
296}
297
298static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
299{
300 uprobe_opcode_t opcode;
301 int result;
302
303 if (current->mm == mm) {
304 pagefault_disable();
305 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
306 sizeof(opcode));
307 pagefault_enable();
308
309 if (likely(result == 0))
310 goto out;
311 }
312
313 result = read_opcode(mm, vaddr, &opcode);
314 if (result)
315 return result;
316out:
317 if (is_swbp_insn(&opcode))
318 return 1;
319
320 return 0;
321}
322
323/**
324 * set_swbp - store breakpoint at a given address. 299 * set_swbp - store breakpoint at a given address.
325 * @auprobe: arch specific probepoint information. 300 * @auprobe: arch specific probepoint information.
326 * @mm: the probed process address space. 301 * @mm: the probed process address space.
@@ -331,18 +306,7 @@ out:
331 */ 306 */
332int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 307int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
333{ 308{
334 int result; 309 return write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
335 /*
336 * See the comment near uprobes_hash().
337 */
338 result = is_swbp_at_addr(mm, vaddr);
339 if (result == 1)
340 return 0;
341
342 if (result)
343 return result;
344
345 return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
346} 310}
347 311
348/** 312/**
@@ -357,16 +321,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned
357int __weak 321int __weak
358set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 322set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
359{ 323{
360 int result; 324 return write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
361
362 result = is_swbp_at_addr(mm, vaddr);
363 if (!result)
364 return -EINVAL;
365
366 if (result != 1)
367 return result;
368
369 return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
370} 325}
371 326
372static int match_uprobe(struct uprobe *l, struct uprobe *r) 327static int match_uprobe(struct uprobe *l, struct uprobe *r)
@@ -473,7 +428,7 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe)
473 spin_unlock(&uprobes_treelock); 428 spin_unlock(&uprobes_treelock);
474 429
475 /* For now assume that the instruction need not be single-stepped */ 430 /* For now assume that the instruction need not be single-stepped */
476 uprobe->flags |= UPROBE_SKIP_SSTEP; 431 __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
477 432
478 return u; 433 return u;
479} 434}
@@ -495,6 +450,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
495 uprobe->inode = igrab(inode); 450 uprobe->inode = igrab(inode);
496 uprobe->offset = offset; 451 uprobe->offset = offset;
497 init_rwsem(&uprobe->consumer_rwsem); 452 init_rwsem(&uprobe->consumer_rwsem);
453 mutex_init(&uprobe->copy_mutex);
498 454
499 /* add to uprobes_tree, sorted on inode:offset */ 455 /* add to uprobes_tree, sorted on inode:offset */
500 cur_uprobe = insert_uprobe(uprobe); 456 cur_uprobe = insert_uprobe(uprobe);
@@ -515,7 +471,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
515{ 471{
516 struct uprobe_consumer *uc; 472 struct uprobe_consumer *uc;
517 473
518 if (!(uprobe->flags & UPROBE_RUN_HANDLER)) 474 if (!test_bit(UPROBE_RUN_HANDLER, &uprobe->flags))
519 return; 475 return;
520 476
521 down_read(&uprobe->consumer_rwsem); 477 down_read(&uprobe->consumer_rwsem);
@@ -621,29 +577,43 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp)
621 return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset); 577 return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
622} 578}
623 579
624/* 580static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
625 * How mm->uprobes_state.count gets updated 581 struct mm_struct *mm, unsigned long vaddr)
626 * uprobe_mmap() increments the count if 582{
627 * - it successfully adds a breakpoint. 583 int ret = 0;
628 * - it cannot add a breakpoint, but sees that there is a underlying 584
629 * breakpoint (via a is_swbp_at_addr()). 585 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
630 * 586 return ret;
631 * uprobe_munmap() decrements the count if 587
632 * - it sees a underlying breakpoint, (via is_swbp_at_addr) 588 mutex_lock(&uprobe->copy_mutex);
633 * (Subsequent uprobe_unregister wouldnt find the breakpoint 589 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
634 * unless a uprobe_mmap kicks in, since the old vma would be 590 goto out;
635 * dropped just after uprobe_munmap.) 591
636 * 592 ret = copy_insn(uprobe, file);
637 * uprobe_register increments the count if: 593 if (ret)
638 * - it successfully adds a breakpoint. 594 goto out;
639 * 595
640 * uprobe_unregister decrements the count if: 596 ret = -ENOTSUPP;
641 * - it sees a underlying breakpoint and removes successfully. 597 if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
642 * (via is_swbp_at_addr) 598 goto out;
643 * (Subsequent uprobe_munmap wouldnt find the breakpoint 599
644 * since there is no underlying breakpoint after the 600 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
645 * breakpoint removal.) 601 if (ret)
646 */ 602 goto out;
603
604 /* write_opcode() assumes we don't cross page boundary */
605 BUG_ON((uprobe->offset & ~PAGE_MASK) +
606 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
607
608 smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
609 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
610
611 out:
612 mutex_unlock(&uprobe->copy_mutex);
613
614 return ret;
615}
616
647static int 617static int
648install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 618install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
649 struct vm_area_struct *vma, unsigned long vaddr) 619 struct vm_area_struct *vma, unsigned long vaddr)
@@ -661,24 +631,9 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
661 if (!uprobe->consumers) 631 if (!uprobe->consumers)
662 return 0; 632 return 0;
663 633
664 if (!(uprobe->flags & UPROBE_COPY_INSN)) { 634 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
665 ret = copy_insn(uprobe, vma->vm_file); 635 if (ret)
666 if (ret) 636 return ret;
667 return ret;
668
669 if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
670 return -ENOTSUPP;
671
672 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
673 if (ret)
674 return ret;
675
676 /* write_opcode() assumes we don't cross page boundary */
677 BUG_ON((uprobe->offset & ~PAGE_MASK) +
678 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
679
680 uprobe->flags |= UPROBE_COPY_INSN;
681 }
682 637
683 /* 638 /*
684 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 639 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
@@ -697,15 +652,15 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
697 return ret; 652 return ret;
698} 653}
699 654
700static void 655static int
701remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 656remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
702{ 657{
703 /* can happen if uprobe_register() fails */ 658 /* can happen if uprobe_register() fails */
704 if (!test_bit(MMF_HAS_UPROBES, &mm->flags)) 659 if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
705 return; 660 return 0;
706 661
707 set_bit(MMF_RECALC_UPROBES, &mm->flags); 662 set_bit(MMF_RECALC_UPROBES, &mm->flags);
708 set_orig_insn(&uprobe->arch, mm, vaddr); 663 return set_orig_insn(&uprobe->arch, mm, vaddr);
709} 664}
710 665
711/* 666/*
@@ -820,7 +775,7 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
820 struct mm_struct *mm = info->mm; 775 struct mm_struct *mm = info->mm;
821 struct vm_area_struct *vma; 776 struct vm_area_struct *vma;
822 777
823 if (err) 778 if (err && is_register)
824 goto free; 779 goto free;
825 780
826 down_write(&mm->mmap_sem); 781 down_write(&mm->mmap_sem);
@@ -836,7 +791,7 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
836 if (is_register) 791 if (is_register)
837 err = install_breakpoint(uprobe, mm, vma, info->vaddr); 792 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
838 else 793 else
839 remove_breakpoint(uprobe, mm, info->vaddr); 794 err |= remove_breakpoint(uprobe, mm, info->vaddr);
840 795
841 unlock: 796 unlock:
842 up_write(&mm->mmap_sem); 797 up_write(&mm->mmap_sem);
@@ -893,13 +848,15 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
893 mutex_lock(uprobes_hash(inode)); 848 mutex_lock(uprobes_hash(inode));
894 uprobe = alloc_uprobe(inode, offset); 849 uprobe = alloc_uprobe(inode, offset);
895 850
896 if (uprobe && !consumer_add(uprobe, uc)) { 851 if (!uprobe) {
852 ret = -ENOMEM;
853 } else if (!consumer_add(uprobe, uc)) {
897 ret = __uprobe_register(uprobe); 854 ret = __uprobe_register(uprobe);
898 if (ret) { 855 if (ret) {
899 uprobe->consumers = NULL; 856 uprobe->consumers = NULL;
900 __uprobe_unregister(uprobe); 857 __uprobe_unregister(uprobe);
901 } else { 858 } else {
902 uprobe->flags |= UPROBE_RUN_HANDLER; 859 set_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
903 } 860 }
904 } 861 }
905 862
@@ -932,7 +889,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
932 if (consumer_del(uprobe, uc)) { 889 if (consumer_del(uprobe, uc)) {
933 if (!uprobe->consumers) { 890 if (!uprobe->consumers) {
934 __uprobe_unregister(uprobe); 891 __uprobe_unregister(uprobe);
935 uprobe->flags &= ~UPROBE_RUN_HANDLER; 892 clear_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
936 } 893 }
937 } 894 }
938 895
@@ -1393,10 +1350,11 @@ bool uprobe_deny_signal(void)
1393 */ 1350 */
1394static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs) 1351static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
1395{ 1352{
1396 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 1353 if (test_bit(UPROBE_SKIP_SSTEP, &uprobe->flags)) {
1397 return true; 1354 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1398 1355 return true;
1399 uprobe->flags &= ~UPROBE_SKIP_SSTEP; 1356 clear_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
1357 }
1400 return false; 1358 return false;
1401} 1359}
1402 1360
@@ -1419,6 +1377,30 @@ static void mmf_recalc_uprobes(struct mm_struct *mm)
1419 clear_bit(MMF_HAS_UPROBES, &mm->flags); 1377 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1420} 1378}
1421 1379
1380static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
1381{
1382 struct page *page;
1383 uprobe_opcode_t opcode;
1384 int result;
1385
1386 pagefault_disable();
1387 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
1388 sizeof(opcode));
1389 pagefault_enable();
1390
1391 if (likely(result == 0))
1392 goto out;
1393
1394 result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
1395 if (result < 0)
1396 return result;
1397
1398 copy_opcode(page, vaddr, &opcode);
1399 put_page(page);
1400 out:
1401 return is_swbp_insn(&opcode);
1402}
1403
1422static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) 1404static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1423{ 1405{
1424 struct mm_struct *mm = current->mm; 1406 struct mm_struct *mm = current->mm;
@@ -1489,38 +1471,41 @@ static void handle_swbp(struct pt_regs *regs)
1489 } 1471 }
1490 return; 1472 return;
1491 } 1473 }
1474 /*
1475 * TODO: move copy_insn/etc into _register and remove this hack.
1476 * After we hit the bp, _unregister + _register can install the
1477 * new and not-yet-analyzed uprobe at the same address, restart.
1478 */
1479 smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1480 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1481 goto restart;
1492 1482
1493 utask = current->utask; 1483 utask = current->utask;
1494 if (!utask) { 1484 if (!utask) {
1495 utask = add_utask(); 1485 utask = add_utask();
1496 /* Cannot allocate; re-execute the instruction. */ 1486 /* Cannot allocate; re-execute the instruction. */
1497 if (!utask) 1487 if (!utask)
1498 goto cleanup_ret; 1488 goto restart;
1499 } 1489 }
1500 utask->active_uprobe = uprobe; 1490
1501 handler_chain(uprobe, regs); 1491 handler_chain(uprobe, regs);
1502 if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs)) 1492 if (can_skip_sstep(uprobe, regs))
1503 goto cleanup_ret; 1493 goto out;
1504 1494
1505 utask->state = UTASK_SSTEP;
1506 if (!pre_ssout(uprobe, regs, bp_vaddr)) { 1495 if (!pre_ssout(uprobe, regs, bp_vaddr)) {
1507 arch_uprobe_enable_step(&uprobe->arch); 1496 arch_uprobe_enable_step(&uprobe->arch);
1497 utask->active_uprobe = uprobe;
1498 utask->state = UTASK_SSTEP;
1508 return; 1499 return;
1509 } 1500 }
1510 1501
1511cleanup_ret: 1502restart:
1512 if (utask) { 1503 /*
1513 utask->active_uprobe = NULL; 1504 * cannot singlestep; cannot skip instruction;
1514 utask->state = UTASK_RUNNING; 1505 * re-execute the instruction.
1515 } 1506 */
1516 if (!(uprobe->flags & UPROBE_SKIP_SSTEP)) 1507 instruction_pointer_set(regs, bp_vaddr);
1517 1508out:
1518 /*
1519 * cannot singlestep; cannot skip instruction;
1520 * re-execute the instruction.
1521 */
1522 instruction_pointer_set(regs, bp_vaddr);
1523
1524 put_uprobe(uprobe); 1509 put_uprobe(uprobe);
1525} 1510}
1526 1511
@@ -1552,13 +1537,12 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1552} 1537}
1553 1538
1554/* 1539/*
1555 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on 1540 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1556 * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and 1541 * allows the thread to return from interrupt. After that handle_swbp()
1557 * allows the thread to return from interrupt. 1542 * sets utask->active_uprobe.
1558 * 1543 *
1559 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and 1544 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1560 * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from 1545 * and allows the thread to return from interrupt.
1561 * interrupt.
1562 * 1546 *
1563 * While returning to userspace, thread notices the TIF_UPROBE flag and calls 1547 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1564 * uprobe_notify_resume(). 1548 * uprobe_notify_resume().
@@ -1567,11 +1551,13 @@ void uprobe_notify_resume(struct pt_regs *regs)
1567{ 1551{
1568 struct uprobe_task *utask; 1552 struct uprobe_task *utask;
1569 1553
1554 clear_thread_flag(TIF_UPROBE);
1555
1570 utask = current->utask; 1556 utask = current->utask;
1571 if (!utask || utask->state == UTASK_BP_HIT) 1557 if (utask && utask->active_uprobe)
1572 handle_swbp(regs);
1573 else
1574 handle_singlestep(utask, regs); 1558 handle_singlestep(utask, regs);
1559 else
1560 handle_swbp(regs);
1575} 1561}
1576 1562
1577/* 1563/*
@@ -1580,17 +1566,10 @@ void uprobe_notify_resume(struct pt_regs *regs)
1580 */ 1566 */
1581int uprobe_pre_sstep_notifier(struct pt_regs *regs) 1567int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1582{ 1568{
1583 struct uprobe_task *utask;
1584
1585 if (!current->mm || !test_bit(MMF_HAS_UPROBES, &current->mm->flags)) 1569 if (!current->mm || !test_bit(MMF_HAS_UPROBES, &current->mm->flags))
1586 return 0; 1570 return 0;
1587 1571
1588 utask = current->utask;
1589 if (utask)
1590 utask->state = UTASK_BP_HIT;
1591
1592 set_thread_flag(TIF_UPROBE); 1572 set_thread_flag(TIF_UPROBE);
1593
1594 return 1; 1573 return 1;
1595} 1574}
1596 1575
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index d492a23df99c..ea1b1df5dbb0 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -192,7 +192,7 @@ int mod_verify_sig(const void *mod, unsigned long *_modlen)
192 size_t modlen = *_modlen, sig_len; 192 size_t modlen = *_modlen, sig_len;
193 int ret; 193 int ret;
194 194
195 pr_devel("==>%s(,%lu)\n", __func__, modlen); 195 pr_devel("==>%s(,%zu)\n", __func__, modlen);
196 196
197 if (modlen <= sizeof(ms)) 197 if (modlen <= sizeof(ms))
198 return -EBADMSG; 198 return -EBADMSG;
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index eb00be205811..7b07cc0dfb75 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -71,12 +71,22 @@ err_alloc:
71 return NULL; 71 return NULL;
72} 72}
73 73
74/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
75#define MAX_PID_NS_LEVEL 32
76
74static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns) 77static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
75{ 78{
76 struct pid_namespace *ns; 79 struct pid_namespace *ns;
77 unsigned int level = parent_pid_ns->level + 1; 80 unsigned int level = parent_pid_ns->level + 1;
78 int i, err = -ENOMEM; 81 int i;
82 int err;
83
84 if (level > MAX_PID_NS_LEVEL) {
85 err = -EINVAL;
86 goto out;
87 }
79 88
89 err = -ENOMEM;
80 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); 90 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
81 if (ns == NULL) 91 if (ns == NULL)
82 goto out; 92 goto out;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b32ed0e385a5..b979426d16c6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1567,6 +1567,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1567 1567
1568 put_online_cpus(); 1568 put_online_cpus();
1569 } else { 1569 } else {
1570 /* Make sure this CPU has been intitialized */
1571 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1572 goto out;
1573
1570 cpu_buffer = buffer->buffers[cpu_id]; 1574 cpu_buffer = buffer->buffers[cpu_id];
1571 1575
1572 if (nr_pages == cpu_buffer->nr_pages) 1576 if (nr_pages == cpu_buffer->nr_pages)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d951daa0ca9a..042d221d33cc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2982,7 +2982,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)
2982 2982
2983 set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); 2983 set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
2984 local_irq_restore(flags); 2984 local_irq_restore(flags);
2985 return true; 2985 return ret;
2986} 2986}
2987EXPORT_SYMBOL(cancel_delayed_work); 2987EXPORT_SYMBOL(cancel_delayed_work);
2988 2988
diff --git a/lib/genalloc.c b/lib/genalloc.c
index ca208a92628c..54920433705a 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -178,7 +178,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
178 struct gen_pool_chunk *chunk; 178 struct gen_pool_chunk *chunk;
179 int nbits = size >> pool->min_alloc_order; 179 int nbits = size >> pool->min_alloc_order;
180 int nbytes = sizeof(struct gen_pool_chunk) + 180 int nbytes = sizeof(struct gen_pool_chunk) +
181 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; 181 BITS_TO_LONGS(nbits) * sizeof(long);
182 182
183 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); 183 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
184 if (unlikely(chunk == NULL)) 184 if (unlikely(chunk == NULL))
diff --git a/mm/memblock.c b/mm/memblock.c
index 931eef145af5..625905523c2a 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -930,6 +930,30 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si
930 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 930 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
931} 931}
932 932
933void __init_memblock memblock_trim_memory(phys_addr_t align)
934{
935 int i;
936 phys_addr_t start, end, orig_start, orig_end;
937 struct memblock_type *mem = &memblock.memory;
938
939 for (i = 0; i < mem->cnt; i++) {
940 orig_start = mem->regions[i].base;
941 orig_end = mem->regions[i].base + mem->regions[i].size;
942 start = round_up(orig_start, align);
943 end = round_down(orig_end, align);
944
945 if (start == orig_start && end == orig_end)
946 continue;
947
948 if (start < end) {
949 mem->regions[i].base = start;
950 mem->regions[i].size = end - start;
951 } else {
952 memblock_remove_region(mem, i);
953 i--;
954 }
955 }
956}
933 957
934void __init_memblock memblock_set_current_limit(phys_addr_t limit) 958void __init_memblock memblock_set_current_limit(phys_addr_t limit)
935{ 959{
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 479a1e751a73..8a5ac8c686b0 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -196,28 +196,28 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
196 BUG_ON(atomic_read(&mm->mm_users) <= 0); 196 BUG_ON(atomic_read(&mm->mm_users) <= 0);
197 197
198 /* 198 /*
199 * Verify that mmu_notifier_init() already run and the global srcu is 199 * Verify that mmu_notifier_init() already run and the global srcu is
200 * initialized. 200 * initialized.
201 */ 201 */
202 BUG_ON(!srcu.per_cpu_ref); 202 BUG_ON(!srcu.per_cpu_ref);
203 203
204 ret = -ENOMEM;
205 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
206 if (unlikely(!mmu_notifier_mm))
207 goto out;
208
204 if (take_mmap_sem) 209 if (take_mmap_sem)
205 down_write(&mm->mmap_sem); 210 down_write(&mm->mmap_sem);
206 ret = mm_take_all_locks(mm); 211 ret = mm_take_all_locks(mm);
207 if (unlikely(ret)) 212 if (unlikely(ret))
208 goto out; 213 goto out_clean;
209 214
210 if (!mm_has_notifiers(mm)) { 215 if (!mm_has_notifiers(mm)) {
211 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
212 GFP_KERNEL);
213 if (unlikely(!mmu_notifier_mm)) {
214 ret = -ENOMEM;
215 goto out_of_mem;
216 }
217 INIT_HLIST_HEAD(&mmu_notifier_mm->list); 216 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
218 spin_lock_init(&mmu_notifier_mm->lock); 217 spin_lock_init(&mmu_notifier_mm->lock);
219 218
220 mm->mmu_notifier_mm = mmu_notifier_mm; 219 mm->mmu_notifier_mm = mmu_notifier_mm;
220 mmu_notifier_mm = NULL;
221 } 221 }
222 atomic_inc(&mm->mm_count); 222 atomic_inc(&mm->mm_count);
223 223
@@ -233,12 +233,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
233 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); 233 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
234 spin_unlock(&mm->mmu_notifier_mm->lock); 234 spin_unlock(&mm->mmu_notifier_mm->lock);
235 235
236out_of_mem:
237 mm_drop_all_locks(mm); 236 mm_drop_all_locks(mm);
238out: 237out_clean:
239 if (take_mmap_sem) 238 if (take_mmap_sem)
240 up_write(&mm->mmap_sem); 239 up_write(&mm->mmap_sem);
241 240 kfree(mmu_notifier_mm);
241out:
242 BUG_ON(atomic_read(&mm->mm_users) <= 0); 242 BUG_ON(atomic_read(&mm->mm_users) <= 0);
243 return ret; 243 return ret;
244} 244}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bb90971182bd..5b74de6702e0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1809,10 +1809,10 @@ static void __paginginit init_zone_allows_reclaim(int nid)
1809 int i; 1809 int i;
1810 1810
1811 for_each_online_node(i) 1811 for_each_online_node(i)
1812 if (node_distance(nid, i) <= RECLAIM_DISTANCE) { 1812 if (node_distance(nid, i) <= RECLAIM_DISTANCE)
1813 node_set(i, NODE_DATA(nid)->reclaim_nodes); 1813 node_set(i, NODE_DATA(nid)->reclaim_nodes);
1814 else
1814 zone_reclaim_mode = 1; 1815 zone_reclaim_mode = 1;
1815 }
1816} 1816}
1817 1817
1818#else /* CONFIG_NUMA */ 1818#else /* CONFIG_NUMA */
@@ -5825,7 +5825,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
5825 ret = start_isolate_page_range(pfn_max_align_down(start), 5825 ret = start_isolate_page_range(pfn_max_align_down(start),
5826 pfn_max_align_up(end), migratetype); 5826 pfn_max_align_up(end), migratetype);
5827 if (ret) 5827 if (ret)
5828 goto done; 5828 return ret;
5829 5829
5830 ret = __alloc_contig_migrate_range(&cc, start, end); 5830 ret = __alloc_contig_migrate_range(&cc, start, end);
5831 if (ret) 5831 if (ret)
diff --git a/mm/rmap.c b/mm/rmap.c
index 7df7984d476c..2ee1ef0f317b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -56,6 +56,7 @@
56#include <linux/mmu_notifier.h> 56#include <linux/mmu_notifier.h>
57#include <linux/migrate.h> 57#include <linux/migrate.h>
58#include <linux/hugetlb.h> 58#include <linux/hugetlb.h>
59#include <linux/backing-dev.h>
59 60
60#include <asm/tlbflush.h> 61#include <asm/tlbflush.h>
61 62
@@ -926,11 +927,8 @@ int page_mkclean(struct page *page)
926 927
927 if (page_mapped(page)) { 928 if (page_mapped(page)) {
928 struct address_space *mapping = page_mapping(page); 929 struct address_space *mapping = page_mapping(page);
929 if (mapping) { 930 if (mapping)
930 ret = page_mkclean_file(mapping, page); 931 ret = page_mkclean_file(mapping, page);
931 if (page_test_and_clear_dirty(page_to_pfn(page), 1))
932 ret = 1;
933 }
934 } 932 }
935 933
936 return ret; 934 return ret;
@@ -1116,6 +1114,7 @@ void page_add_file_rmap(struct page *page)
1116 */ 1114 */
1117void page_remove_rmap(struct page *page) 1115void page_remove_rmap(struct page *page)
1118{ 1116{
1117 struct address_space *mapping = page_mapping(page);
1119 bool anon = PageAnon(page); 1118 bool anon = PageAnon(page);
1120 bool locked; 1119 bool locked;
1121 unsigned long flags; 1120 unsigned long flags;
@@ -1138,8 +1137,19 @@ void page_remove_rmap(struct page *page)
1138 * this if the page is anon, so about to be freed; but perhaps 1137 * this if the page is anon, so about to be freed; but perhaps
1139 * not if it's in swapcache - there might be another pte slot 1138 * not if it's in swapcache - there might be another pte slot
1140 * containing the swap entry, but page not yet written to swap. 1139 * containing the swap entry, but page not yet written to swap.
1140 *
1141 * And we can skip it on file pages, so long as the filesystem
1142 * participates in dirty tracking; but need to catch shm and tmpfs
1143 * and ramfs pages which have been modified since creation by read
1144 * fault.
1145 *
1146 * Note that mapping must be decided above, before decrementing
1147 * mapcount (which luckily provides a barrier): once page is unmapped,
1148 * it could be truncated and page->mapping reset to NULL at any moment.
1149 * Note also that we are relying on page_mapping(page) to set mapping
1150 * to &swapper_space when PageSwapCache(page).
1141 */ 1151 */
1142 if ((!anon || PageSwapCache(page)) && 1152 if (mapping && !mapping_cap_account_dirty(mapping) &&
1143 page_test_and_clear_dirty(page_to_pfn(page), 1)) 1153 page_test_and_clear_dirty(page_to_pfn(page), 1))
1144 set_page_dirty(page); 1154 set_page_dirty(page);
1145 /* 1155 /*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index aaaadfbe36e9..75853cabf4c9 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -254,7 +254,6 @@ struct sock_xprt {
254 void (*old_data_ready)(struct sock *, int); 254 void (*old_data_ready)(struct sock *, int);
255 void (*old_state_change)(struct sock *); 255 void (*old_state_change)(struct sock *);
256 void (*old_write_space)(struct sock *); 256 void (*old_write_space)(struct sock *);
257 void (*old_error_report)(struct sock *);
258}; 257};
259 258
260/* 259/*
@@ -737,10 +736,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
737 dprintk("RPC: sendmsg returned unrecognized error %d\n", 736 dprintk("RPC: sendmsg returned unrecognized error %d\n",
738 -status); 737 -status);
739 case -ECONNRESET: 738 case -ECONNRESET:
740 case -EPIPE:
741 xs_tcp_shutdown(xprt); 739 xs_tcp_shutdown(xprt);
742 case -ECONNREFUSED: 740 case -ECONNREFUSED:
743 case -ENOTCONN: 741 case -ENOTCONN:
742 case -EPIPE:
744 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 743 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
745 } 744 }
746 745
@@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
781 transport->old_data_ready = sk->sk_data_ready; 780 transport->old_data_ready = sk->sk_data_ready;
782 transport->old_state_change = sk->sk_state_change; 781 transport->old_state_change = sk->sk_state_change;
783 transport->old_write_space = sk->sk_write_space; 782 transport->old_write_space = sk->sk_write_space;
784 transport->old_error_report = sk->sk_error_report;
785} 783}
786 784
787static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) 785static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
@@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
789 sk->sk_data_ready = transport->old_data_ready; 787 sk->sk_data_ready = transport->old_data_ready;
790 sk->sk_state_change = transport->old_state_change; 788 sk->sk_state_change = transport->old_state_change;
791 sk->sk_write_space = transport->old_write_space; 789 sk->sk_write_space = transport->old_write_space;
792 sk->sk_error_report = transport->old_error_report;
793} 790}
794 791
795static void xs_reset_transport(struct sock_xprt *transport) 792static void xs_reset_transport(struct sock_xprt *transport)
@@ -1453,7 +1450,7 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
1453 xprt_clear_connecting(xprt); 1450 xprt_clear_connecting(xprt);
1454} 1451}
1455 1452
1456static void xs_sock_mark_closed(struct rpc_xprt *xprt) 1453static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1457{ 1454{
1458 smp_mb__before_clear_bit(); 1455 smp_mb__before_clear_bit();
1459 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1456 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
@@ -1461,6 +1458,11 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
1461 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1458 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1462 clear_bit(XPRT_CLOSING, &xprt->state); 1459 clear_bit(XPRT_CLOSING, &xprt->state);
1463 smp_mb__after_clear_bit(); 1460 smp_mb__after_clear_bit();
1461}
1462
1463static void xs_sock_mark_closed(struct rpc_xprt *xprt)
1464{
1465 xs_sock_reset_connection_flags(xprt);
1464 /* Mark transport as closed and wake up all pending tasks */ 1466 /* Mark transport as closed and wake up all pending tasks */
1465 xprt_disconnect_done(xprt); 1467 xprt_disconnect_done(xprt);
1466} 1468}
@@ -1516,6 +1518,7 @@ static void xs_tcp_state_change(struct sock *sk)
1516 case TCP_CLOSE_WAIT: 1518 case TCP_CLOSE_WAIT:
1517 /* The server initiated a shutdown of the socket */ 1519 /* The server initiated a shutdown of the socket */
1518 xprt->connect_cookie++; 1520 xprt->connect_cookie++;
1521 clear_bit(XPRT_CONNECTED, &xprt->state);
1519 xs_tcp_force_close(xprt); 1522 xs_tcp_force_close(xprt);
1520 case TCP_CLOSING: 1523 case TCP_CLOSING:
1521 /* 1524 /*
@@ -1540,25 +1543,6 @@ static void xs_tcp_state_change(struct sock *sk)
1540 read_unlock_bh(&sk->sk_callback_lock); 1543 read_unlock_bh(&sk->sk_callback_lock);
1541} 1544}
1542 1545
1543/**
1544 * xs_error_report - callback mainly for catching socket errors
1545 * @sk: socket
1546 */
1547static void xs_error_report(struct sock *sk)
1548{
1549 struct rpc_xprt *xprt;
1550
1551 read_lock_bh(&sk->sk_callback_lock);
1552 if (!(xprt = xprt_from_sock(sk)))
1553 goto out;
1554 dprintk("RPC: %s client %p...\n"
1555 "RPC: error %d\n",
1556 __func__, xprt, sk->sk_err);
1557 xprt_wake_pending_tasks(xprt, -EAGAIN);
1558out:
1559 read_unlock_bh(&sk->sk_callback_lock);
1560}
1561
1562static void xs_write_space(struct sock *sk) 1546static void xs_write_space(struct sock *sk)
1563{ 1547{
1564 struct socket *sock; 1548 struct socket *sock;
@@ -1858,7 +1842,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1858 sk->sk_user_data = xprt; 1842 sk->sk_user_data = xprt;
1859 sk->sk_data_ready = xs_local_data_ready; 1843 sk->sk_data_ready = xs_local_data_ready;
1860 sk->sk_write_space = xs_udp_write_space; 1844 sk->sk_write_space = xs_udp_write_space;
1861 sk->sk_error_report = xs_error_report;
1862 sk->sk_allocation = GFP_ATOMIC; 1845 sk->sk_allocation = GFP_ATOMIC;
1863 1846
1864 xprt_clear_connected(xprt); 1847 xprt_clear_connected(xprt);
@@ -1983,7 +1966,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1983 sk->sk_user_data = xprt; 1966 sk->sk_user_data = xprt;
1984 sk->sk_data_ready = xs_udp_data_ready; 1967 sk->sk_data_ready = xs_udp_data_ready;
1985 sk->sk_write_space = xs_udp_write_space; 1968 sk->sk_write_space = xs_udp_write_space;
1986 sk->sk_error_report = xs_error_report;
1987 sk->sk_no_check = UDP_CSUM_NORCV; 1969 sk->sk_no_check = UDP_CSUM_NORCV;
1988 sk->sk_allocation = GFP_ATOMIC; 1970 sk->sk_allocation = GFP_ATOMIC;
1989 1971
@@ -2050,10 +2032,8 @@ static void xs_abort_connection(struct sock_xprt *transport)
2050 any.sa_family = AF_UNSPEC; 2032 any.sa_family = AF_UNSPEC;
2051 result = kernel_connect(transport->sock, &any, sizeof(any), 0); 2033 result = kernel_connect(transport->sock, &any, sizeof(any), 0);
2052 if (!result) 2034 if (!result)
2053 xs_sock_mark_closed(&transport->xprt); 2035 xs_sock_reset_connection_flags(&transport->xprt);
2054 else 2036 dprintk("RPC: AF_UNSPEC connect return code %d\n", result);
2055 dprintk("RPC: AF_UNSPEC connect return code %d\n",
2056 result);
2057} 2037}
2058 2038
2059static void xs_tcp_reuse_connection(struct sock_xprt *transport) 2039static void xs_tcp_reuse_connection(struct sock_xprt *transport)
@@ -2098,7 +2078,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2098 sk->sk_data_ready = xs_tcp_data_ready; 2078 sk->sk_data_ready = xs_tcp_data_ready;
2099 sk->sk_state_change = xs_tcp_state_change; 2079 sk->sk_state_change = xs_tcp_state_change;
2100 sk->sk_write_space = xs_tcp_write_space; 2080 sk->sk_write_space = xs_tcp_write_space;
2101 sk->sk_error_report = xs_error_report;
2102 sk->sk_allocation = GFP_ATOMIC; 2081 sk->sk_allocation = GFP_ATOMIC;
2103 2082
2104 /* socket options */ 2083 /* socket options */
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index cf5fd220309b..813200384d97 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -724,6 +724,8 @@ fail:
724 */ 724 */
725static void free_profile(struct aa_profile *profile) 725static void free_profile(struct aa_profile *profile)
726{ 726{
727 struct aa_profile *p;
728
727 AA_DEBUG("%s(%p)\n", __func__, profile); 729 AA_DEBUG("%s(%p)\n", __func__, profile);
728 730
729 if (!profile) 731 if (!profile)
@@ -751,7 +753,27 @@ static void free_profile(struct aa_profile *profile)
751 aa_put_dfa(profile->xmatch); 753 aa_put_dfa(profile->xmatch);
752 aa_put_dfa(profile->policy.dfa); 754 aa_put_dfa(profile->policy.dfa);
753 755
754 aa_put_profile(profile->replacedby); 756 /* put the profile reference for replacedby, but not via
757 * put_profile(kref_put).
758 * replacedby can form a long chain that can result in cascading
759 * frees that blows the stack because kref_put makes a nested fn
760 * call (it looks like recursion, with free_profile calling
761 * free_profile) for each profile in the chain lp#1056078.
762 */
763 for (p = profile->replacedby; p; ) {
764 if (atomic_dec_and_test(&p->base.count.refcount)) {
765 /* no more refs on p, grab its replacedby */
766 struct aa_profile *next = p->replacedby;
767 /* break the chain */
768 p->replacedby = NULL;
769 /* now free p, chain is broken */
770 free_profile(p);
771
772 /* follow up with next profile in the chain */
773 p = next;
774 } else
775 break;
776 }
755 777
756 kzfree(profile); 778 kzfree(profile);
757} 779}
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 44dfc415a379..842c254396db 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -42,7 +42,10 @@ struct dev_exception_item {
42struct dev_cgroup { 42struct dev_cgroup {
43 struct cgroup_subsys_state css; 43 struct cgroup_subsys_state css;
44 struct list_head exceptions; 44 struct list_head exceptions;
45 bool deny_all; 45 enum {
46 DEVCG_DEFAULT_ALLOW,
47 DEVCG_DEFAULT_DENY,
48 } behavior;
46}; 49};
47 50
48static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) 51static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
@@ -182,13 +185,13 @@ static struct cgroup_subsys_state *devcgroup_create(struct cgroup *cgroup)
182 parent_cgroup = cgroup->parent; 185 parent_cgroup = cgroup->parent;
183 186
184 if (parent_cgroup == NULL) 187 if (parent_cgroup == NULL)
185 dev_cgroup->deny_all = false; 188 dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
186 else { 189 else {
187 parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup); 190 parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
188 mutex_lock(&devcgroup_mutex); 191 mutex_lock(&devcgroup_mutex);
189 ret = dev_exceptions_copy(&dev_cgroup->exceptions, 192 ret = dev_exceptions_copy(&dev_cgroup->exceptions,
190 &parent_dev_cgroup->exceptions); 193 &parent_dev_cgroup->exceptions);
191 dev_cgroup->deny_all = parent_dev_cgroup->deny_all; 194 dev_cgroup->behavior = parent_dev_cgroup->behavior;
192 mutex_unlock(&devcgroup_mutex); 195 mutex_unlock(&devcgroup_mutex);
193 if (ret) { 196 if (ret) {
194 kfree(dev_cgroup); 197 kfree(dev_cgroup);
@@ -260,7 +263,7 @@ static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
260 * - List the exceptions in case the default policy is to deny 263 * - List the exceptions in case the default policy is to deny
261 * This way, the file remains as a "whitelist of devices" 264 * This way, the file remains as a "whitelist of devices"
262 */ 265 */
263 if (devcgroup->deny_all == false) { 266 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
264 set_access(acc, ACC_MASK); 267 set_access(acc, ACC_MASK);
265 set_majmin(maj, ~0); 268 set_majmin(maj, ~0);
266 set_majmin(min, ~0); 269 set_majmin(min, ~0);
@@ -314,12 +317,12 @@ static int may_access(struct dev_cgroup *dev_cgroup,
314 * In two cases we'll consider this new exception valid: 317 * In two cases we'll consider this new exception valid:
315 * - the dev cgroup has its default policy to allow + exception list: 318 * - the dev cgroup has its default policy to allow + exception list:
316 * the new exception should *not* match any of the exceptions 319 * the new exception should *not* match any of the exceptions
317 * (!deny_all, !match) 320 * (behavior == DEVCG_DEFAULT_ALLOW, !match)
318 * - the dev cgroup has its default policy to deny + exception list: 321 * - the dev cgroup has its default policy to deny + exception list:
319 * the new exception *should* match the exceptions 322 * the new exception *should* match the exceptions
320 * (deny_all, match) 323 * (behavior == DEVCG_DEFAULT_DENY, match)
321 */ 324 */
322 if (dev_cgroup->deny_all == match) 325 if ((dev_cgroup->behavior == DEVCG_DEFAULT_DENY) == match)
323 return 1; 326 return 1;
324 return 0; 327 return 0;
325} 328}
@@ -341,6 +344,17 @@ static int parent_has_perm(struct dev_cgroup *childcg,
341 return may_access(parent, ex); 344 return may_access(parent, ex);
342} 345}
343 346
347/**
348 * may_allow_all - checks if it's possible to change the behavior to
349 * allow based on parent's rules.
350 * @parent: device cgroup's parent
351 * returns: != 0 in case it's allowed, 0 otherwise
352 */
353static inline int may_allow_all(struct dev_cgroup *parent)
354{
355 return parent->behavior == DEVCG_DEFAULT_ALLOW;
356}
357
344/* 358/*
345 * Modify the exception list using allow/deny rules. 359 * Modify the exception list using allow/deny rules.
346 * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD 360 * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
@@ -358,9 +372,11 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
358 int filetype, const char *buffer) 372 int filetype, const char *buffer)
359{ 373{
360 const char *b; 374 const char *b;
361 char *endp; 375 char temp[12]; /* 11 + 1 characters needed for a u32 */
362 int count; 376 int count, rc;
363 struct dev_exception_item ex; 377 struct dev_exception_item ex;
378 struct cgroup *p = devcgroup->css.cgroup;
379 struct dev_cgroup *parent = cgroup_to_devcgroup(p->parent);
364 380
365 if (!capable(CAP_SYS_ADMIN)) 381 if (!capable(CAP_SYS_ADMIN))
366 return -EPERM; 382 return -EPERM;
@@ -372,14 +388,18 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
372 case 'a': 388 case 'a':
373 switch (filetype) { 389 switch (filetype) {
374 case DEVCG_ALLOW: 390 case DEVCG_ALLOW:
375 if (!parent_has_perm(devcgroup, &ex)) 391 if (!may_allow_all(parent))
376 return -EPERM; 392 return -EPERM;
377 dev_exception_clean(devcgroup); 393 dev_exception_clean(devcgroup);
378 devcgroup->deny_all = false; 394 rc = dev_exceptions_copy(&devcgroup->exceptions,
395 &parent->exceptions);
396 if (rc)
397 return rc;
398 devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
379 break; 399 break;
380 case DEVCG_DENY: 400 case DEVCG_DENY:
381 dev_exception_clean(devcgroup); 401 dev_exception_clean(devcgroup);
382 devcgroup->deny_all = true; 402 devcgroup->behavior = DEVCG_DEFAULT_DENY;
383 break; 403 break;
384 default: 404 default:
385 return -EINVAL; 405 return -EINVAL;
@@ -402,8 +422,16 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
402 ex.major = ~0; 422 ex.major = ~0;
403 b++; 423 b++;
404 } else if (isdigit(*b)) { 424 } else if (isdigit(*b)) {
405 ex.major = simple_strtoul(b, &endp, 10); 425 memset(temp, 0, sizeof(temp));
406 b = endp; 426 for (count = 0; count < sizeof(temp) - 1; count++) {
427 temp[count] = *b;
428 b++;
429 if (!isdigit(*b))
430 break;
431 }
432 rc = kstrtou32(temp, 10, &ex.major);
433 if (rc)
434 return -EINVAL;
407 } else { 435 } else {
408 return -EINVAL; 436 return -EINVAL;
409 } 437 }
@@ -416,8 +444,16 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
416 ex.minor = ~0; 444 ex.minor = ~0;
417 b++; 445 b++;
418 } else if (isdigit(*b)) { 446 } else if (isdigit(*b)) {
419 ex.minor = simple_strtoul(b, &endp, 10); 447 memset(temp, 0, sizeof(temp));
420 b = endp; 448 for (count = 0; count < sizeof(temp) - 1; count++) {
449 temp[count] = *b;
450 b++;
451 if (!isdigit(*b))
452 break;
453 }
454 rc = kstrtou32(temp, 10, &ex.minor);
455 if (rc)
456 return -EINVAL;
421 } else { 457 } else {
422 return -EINVAL; 458 return -EINVAL;
423 } 459 }
@@ -452,7 +488,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
452 * an matching exception instead. And be silent about it: we 488 * an matching exception instead. And be silent about it: we
453 * don't want to break compatibility 489 * don't want to break compatibility
454 */ 490 */
455 if (devcgroup->deny_all == false) { 491 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
456 dev_exception_rm(devcgroup, &ex); 492 dev_exception_rm(devcgroup, &ex);
457 return 0; 493 return 0;
458 } 494 }
@@ -463,7 +499,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
463 * an matching exception instead. And be silent about it: we 499 * an matching exception instead. And be silent about it: we
464 * don't want to break compatibility 500 * don't want to break compatibility
465 */ 501 */
466 if (devcgroup->deny_all == true) { 502 if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
467 dev_exception_rm(devcgroup, &ex); 503 dev_exception_rm(devcgroup, &ex);
468 return 0; 504 return 0;
469 } 505 }
@@ -533,10 +569,10 @@ struct cgroup_subsys devices_subsys = {
533 * 569 *
534 * returns 0 on success, -EPERM case the operation is not permitted 570 * returns 0 on success, -EPERM case the operation is not permitted
535 */ 571 */
536static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup, 572static int __devcgroup_check_permission(short type, u32 major, u32 minor,
537 short type, u32 major, u32 minor,
538 short access) 573 short access)
539{ 574{
575 struct dev_cgroup *dev_cgroup;
540 struct dev_exception_item ex; 576 struct dev_exception_item ex;
541 int rc; 577 int rc;
542 578
@@ -547,6 +583,7 @@ static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
547 ex.access = access; 583 ex.access = access;
548 584
549 rcu_read_lock(); 585 rcu_read_lock();
586 dev_cgroup = task_devcgroup(current);
550 rc = may_access(dev_cgroup, &ex); 587 rc = may_access(dev_cgroup, &ex);
551 rcu_read_unlock(); 588 rcu_read_unlock();
552 589
@@ -558,7 +595,6 @@ static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
558 595
559int __devcgroup_inode_permission(struct inode *inode, int mask) 596int __devcgroup_inode_permission(struct inode *inode, int mask)
560{ 597{
561 struct dev_cgroup *dev_cgroup = task_devcgroup(current);
562 short type, access = 0; 598 short type, access = 0;
563 599
564 if (S_ISBLK(inode->i_mode)) 600 if (S_ISBLK(inode->i_mode))
@@ -570,13 +606,12 @@ int __devcgroup_inode_permission(struct inode *inode, int mask)
570 if (mask & MAY_READ) 606 if (mask & MAY_READ)
571 access |= ACC_READ; 607 access |= ACC_READ;
572 608
573 return __devcgroup_check_permission(dev_cgroup, type, imajor(inode), 609 return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
574 iminor(inode), access); 610 access);
575} 611}
576 612
577int devcgroup_inode_mknod(int mode, dev_t dev) 613int devcgroup_inode_mknod(int mode, dev_t dev)
578{ 614{
579 struct dev_cgroup *dev_cgroup = task_devcgroup(current);
580 short type; 615 short type;
581 616
582 if (!S_ISBLK(mode) && !S_ISCHR(mode)) 617 if (!S_ISBLK(mode) && !S_ISCHR(mode))
@@ -587,7 +622,7 @@ int devcgroup_inode_mknod(int mode, dev_t dev)
587 else 622 else
588 type = DEV_CHAR; 623 type = DEV_CHAR;
589 624
590 return __devcgroup_check_permission(dev_cgroup, type, MAJOR(dev), 625 return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
591 MINOR(dev), ACC_MKNOD); 626 ACC_MKNOD);
592 627
593} 628}
diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c
index 3d1afb612b35..4a7ff4e8985b 100644
--- a/sound/isa/opti9xx/miro.c
+++ b/sound/isa/opti9xx/miro.c
@@ -1286,7 +1286,6 @@ static int __devinit snd_miro_probe(struct snd_card *card)
1286 1286
1287 error = snd_card_miro_aci_detect(card, miro); 1287 error = snd_card_miro_aci_detect(card, miro);
1288 if (error < 0) { 1288 if (error < 0) {
1289 snd_card_free(card);
1290 snd_printk(KERN_ERR "unable to detect aci chip\n"); 1289 snd_printk(KERN_ERR "unable to detect aci chip\n");
1291 return -ENODEV; 1290 return -ENODEV;
1292 } 1291 }
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
index 00f157a2cf64..5af3cb6b0c18 100644
--- a/sound/pci/als300.c
+++ b/sound/pci/als300.c
@@ -394,6 +394,8 @@ static int snd_als300_playback_open(struct snd_pcm_substream *substream)
394 struct snd_als300_substream_data *data = kzalloc(sizeof(*data), 394 struct snd_als300_substream_data *data = kzalloc(sizeof(*data),
395 GFP_KERNEL); 395 GFP_KERNEL);
396 396
397 if (!data)
398 return -ENOMEM;
397 snd_als300_dbgcallenter(); 399 snd_als300_dbgcallenter();
398 chip->playback_substream = substream; 400 chip->playback_substream = substream;
399 runtime->hw = snd_als300_playback_hw; 401 runtime->hw = snd_als300_playback_hw;
@@ -425,6 +427,8 @@ static int snd_als300_capture_open(struct snd_pcm_substream *substream)
425 struct snd_als300_substream_data *data = kzalloc(sizeof(*data), 427 struct snd_als300_substream_data *data = kzalloc(sizeof(*data),
426 GFP_KERNEL); 428 GFP_KERNEL);
427 429
430 if (!data)
431 return -ENOMEM;
428 snd_als300_dbgcallenter(); 432 snd_als300_dbgcallenter();
429 chip->capture_substream = substream; 433 chip->capture_substream = substream;
430 runtime->hw = snd_als300_capture_hw; 434 runtime->hw = snd_als300_capture_hw;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 48d9d609f89b..f7397ad02a0d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5677,6 +5677,7 @@ static const struct hda_verb alc268_beep_init_verbs[] = {
5677 5677
5678enum { 5678enum {
5679 ALC268_FIXUP_INV_DMIC, 5679 ALC268_FIXUP_INV_DMIC,
5680 ALC268_FIXUP_HP_EAPD,
5680}; 5681};
5681 5682
5682static const struct alc_fixup alc268_fixups[] = { 5683static const struct alc_fixup alc268_fixups[] = {
@@ -5684,10 +5685,26 @@ static const struct alc_fixup alc268_fixups[] = {
5684 .type = ALC_FIXUP_FUNC, 5685 .type = ALC_FIXUP_FUNC,
5685 .v.func = alc_fixup_inv_dmic_0x12, 5686 .v.func = alc_fixup_inv_dmic_0x12,
5686 }, 5687 },
5688 [ALC268_FIXUP_HP_EAPD] = {
5689 .type = ALC_FIXUP_VERBS,
5690 .v.verbs = (const struct hda_verb[]) {
5691 {0x15, AC_VERB_SET_EAPD_BTLENABLE, 0},
5692 {}
5693 }
5694 },
5687}; 5695};
5688 5696
5689static const struct alc_model_fixup alc268_fixup_models[] = { 5697static const struct alc_model_fixup alc268_fixup_models[] = {
5690 {.id = ALC268_FIXUP_INV_DMIC, .name = "inv-dmic"}, 5698 {.id = ALC268_FIXUP_INV_DMIC, .name = "inv-dmic"},
5699 {.id = ALC268_FIXUP_HP_EAPD, .name = "hp-eapd"},
5700 {}
5701};
5702
5703static const struct snd_pci_quirk alc268_fixup_tbl[] = {
5704 /* below is codec SSID since multiple Toshiba laptops have the
5705 * same PCI SSID 1179:ff00
5706 */
5707 SND_PCI_QUIRK(0x1179, 0xff06, "Toshiba P200", ALC268_FIXUP_HP_EAPD),
5691 {} 5708 {}
5692}; 5709};
5693 5710
@@ -5722,7 +5739,7 @@ static int patch_alc268(struct hda_codec *codec)
5722 5739
5723 spec = codec->spec; 5740 spec = codec->spec;
5724 5741
5725 alc_pick_fixup(codec, alc268_fixup_models, NULL, alc268_fixups); 5742 alc_pick_fixup(codec, alc268_fixup_models, alc268_fixup_tbl, alc268_fixups);
5726 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); 5743 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
5727 5744
5728 /* automatic parse from the BIOS config */ 5745 /* automatic parse from the BIOS config */
@@ -6188,6 +6205,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6188 SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), 6205 SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
6189 SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK), 6206 SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
6190 SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), 6207 SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
6208 SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
6191 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), 6209 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
6192 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), 6210 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
6193 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 6211 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index b12308b5ba2a..f1cd1e387801 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -971,6 +971,7 @@ static inline void snd_hdspm_initialize_midi_flush(struct hdspm *hdspm);
971static int hdspm_update_simple_mixer_controls(struct hdspm *hdspm); 971static int hdspm_update_simple_mixer_controls(struct hdspm *hdspm);
972static int hdspm_autosync_ref(struct hdspm *hdspm); 972static int hdspm_autosync_ref(struct hdspm *hdspm);
973static int snd_hdspm_set_defaults(struct hdspm *hdspm); 973static int snd_hdspm_set_defaults(struct hdspm *hdspm);
974static int hdspm_system_clock_mode(struct hdspm *hdspm);
974static void hdspm_set_sgbuf(struct hdspm *hdspm, 975static void hdspm_set_sgbuf(struct hdspm *hdspm,
975 struct snd_pcm_substream *substream, 976 struct snd_pcm_substream *substream,
976 unsigned int reg, int channels); 977 unsigned int reg, int channels);
@@ -1989,10 +1990,14 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
1989 rate = hdspm_calc_dds_value(hdspm, period); 1990 rate = hdspm_calc_dds_value(hdspm, period);
1990 1991
1991 if (rate > 207000) { 1992 if (rate > 207000) {
1992 /* Unreasonable high sample rate as seen on PCI MADI cards. 1993 /* Unreasonable high sample rate as seen on PCI MADI cards. */
1993 * Use the cached value instead. 1994 if (0 == hdspm_system_clock_mode(hdspm)) {
1994 */ 1995 /* master mode, return internal sample rate */
1995 rate = hdspm->system_sample_rate; 1996 rate = hdspm->system_sample_rate;
1997 } else {
1998 /* slave mode, return external sample rate */
1999 rate = hdspm_external_sample_rate(hdspm);
2000 }
1996 } 2001 }
1997 2002
1998 return rate; 2003 return rate;
@@ -2000,12 +2005,14 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
2000 2005
2001 2006
2002#define HDSPM_SYSTEM_SAMPLE_RATE(xname, xindex) \ 2007#define HDSPM_SYSTEM_SAMPLE_RATE(xname, xindex) \
2003{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 2008{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2004 .name = xname, \ 2009 .name = xname, \
2005 .index = xindex, \ 2010 .index = xindex, \
2006 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 2011 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
2007 .info = snd_hdspm_info_system_sample_rate, \ 2012 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
2008 .get = snd_hdspm_get_system_sample_rate \ 2013 .info = snd_hdspm_info_system_sample_rate, \
2014 .put = snd_hdspm_put_system_sample_rate, \
2015 .get = snd_hdspm_get_system_sample_rate \
2009} 2016}
2010 2017
2011static int snd_hdspm_info_system_sample_rate(struct snd_kcontrol *kcontrol, 2018static int snd_hdspm_info_system_sample_rate(struct snd_kcontrol *kcontrol,
@@ -2030,6 +2037,16 @@ static int snd_hdspm_get_system_sample_rate(struct snd_kcontrol *kcontrol,
2030 return 0; 2037 return 0;
2031} 2038}
2032 2039
2040static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
2041 struct snd_ctl_elem_value *
2042 ucontrol)
2043{
2044 struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
2045
2046 hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
2047 return 0;
2048}
2049
2033 2050
2034/** 2051/**
2035 * Returns the WordClock sample rate class for the given card. 2052 * Returns the WordClock sample rate class for the given card.
@@ -2163,6 +2180,7 @@ static int snd_hdspm_get_autosync_sample_rate(struct snd_kcontrol *kcontrol,
2163 hdspm_get_s1_sample_rate(hdspm, 2180 hdspm_get_s1_sample_rate(hdspm,
2164 kcontrol->private_value-1); 2181 kcontrol->private_value-1);
2165 } 2182 }
2183 break;
2166 2184
2167 case AIO: 2185 case AIO:
2168 switch (kcontrol->private_value) { 2186 switch (kcontrol->private_value) {
@@ -2183,6 +2201,7 @@ static int snd_hdspm_get_autosync_sample_rate(struct snd_kcontrol *kcontrol,
2183 hdspm_get_s1_sample_rate(hdspm, 2201 hdspm_get_s1_sample_rate(hdspm,
2184 ucontrol->id.index-1); 2202 ucontrol->id.index-1);
2185 } 2203 }
2204 break;
2186 2205
2187 case AES32: 2206 case AES32:
2188 2207
@@ -2204,8 +2223,23 @@ static int snd_hdspm_get_autosync_sample_rate(struct snd_kcontrol *kcontrol,
2204 hdspm_get_s1_sample_rate(hdspm, 2223 hdspm_get_s1_sample_rate(hdspm,
2205 kcontrol->private_value-1); 2224 kcontrol->private_value-1);
2206 break; 2225 break;
2226 }
2227 break;
2207 2228
2229 case MADI:
2230 case MADIface:
2231 {
2232 int rate = hdspm_external_sample_rate(hdspm);
2233 int i, selected_rate = 0;
2234 for (i = 1; i < 10; i++)
2235 if (HDSPM_bit2freq(i) == rate) {
2236 selected_rate = i;
2237 break;
2238 }
2239 ucontrol->value.enumerated.item[0] = selected_rate;
2208 } 2240 }
2241 break;
2242
2209 default: 2243 default:
2210 break; 2244 break;
2211 } 2245 }
@@ -2430,7 +2464,7 @@ static int snd_hdspm_put_clock_source(struct snd_kcontrol *kcontrol,
2430 2464
2431 2465
2432#define HDSPM_PREF_SYNC_REF(xname, xindex) \ 2466#define HDSPM_PREF_SYNC_REF(xname, xindex) \
2433{.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 2467{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2434 .name = xname, \ 2468 .name = xname, \
2435 .index = xindex, \ 2469 .index = xindex, \
2436 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\ 2470 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
@@ -2766,12 +2800,12 @@ static int snd_hdspm_put_pref_sync_ref(struct snd_kcontrol *kcontrol,
2766 2800
2767 2801
2768#define HDSPM_AUTOSYNC_REF(xname, xindex) \ 2802#define HDSPM_AUTOSYNC_REF(xname, xindex) \
2769{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 2803{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2770 .name = xname, \ 2804 .name = xname, \
2771 .index = xindex, \ 2805 .index = xindex, \
2772 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 2806 .access = SNDRV_CTL_ELEM_ACCESS_READ, \
2773 .info = snd_hdspm_info_autosync_ref, \ 2807 .info = snd_hdspm_info_autosync_ref, \
2774 .get = snd_hdspm_get_autosync_ref, \ 2808 .get = snd_hdspm_get_autosync_ref, \
2775} 2809}
2776 2810
2777static int hdspm_autosync_ref(struct hdspm *hdspm) 2811static int hdspm_autosync_ref(struct hdspm *hdspm)
@@ -2855,12 +2889,12 @@ static int snd_hdspm_get_autosync_ref(struct snd_kcontrol *kcontrol,
2855 2889
2856 2890
2857#define HDSPM_LINE_OUT(xname, xindex) \ 2891#define HDSPM_LINE_OUT(xname, xindex) \
2858{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 2892{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2859 .name = xname, \ 2893 .name = xname, \
2860 .index = xindex, \ 2894 .index = xindex, \
2861 .info = snd_hdspm_info_line_out, \ 2895 .info = snd_hdspm_info_line_out, \
2862 .get = snd_hdspm_get_line_out, \ 2896 .get = snd_hdspm_get_line_out, \
2863 .put = snd_hdspm_put_line_out \ 2897 .put = snd_hdspm_put_line_out \
2864} 2898}
2865 2899
2866static int hdspm_line_out(struct hdspm * hdspm) 2900static int hdspm_line_out(struct hdspm * hdspm)
@@ -2912,12 +2946,12 @@ static int snd_hdspm_put_line_out(struct snd_kcontrol *kcontrol,
2912 2946
2913 2947
2914#define HDSPM_TX_64(xname, xindex) \ 2948#define HDSPM_TX_64(xname, xindex) \
2915{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 2949{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2916 .name = xname, \ 2950 .name = xname, \
2917 .index = xindex, \ 2951 .index = xindex, \
2918 .info = snd_hdspm_info_tx_64, \ 2952 .info = snd_hdspm_info_tx_64, \
2919 .get = snd_hdspm_get_tx_64, \ 2953 .get = snd_hdspm_get_tx_64, \
2920 .put = snd_hdspm_put_tx_64 \ 2954 .put = snd_hdspm_put_tx_64 \
2921} 2955}
2922 2956
2923static int hdspm_tx_64(struct hdspm * hdspm) 2957static int hdspm_tx_64(struct hdspm * hdspm)
@@ -2968,12 +3002,12 @@ static int snd_hdspm_put_tx_64(struct snd_kcontrol *kcontrol,
2968 3002
2969 3003
2970#define HDSPM_C_TMS(xname, xindex) \ 3004#define HDSPM_C_TMS(xname, xindex) \
2971{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 3005{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2972 .name = xname, \ 3006 .name = xname, \
2973 .index = xindex, \ 3007 .index = xindex, \
2974 .info = snd_hdspm_info_c_tms, \ 3008 .info = snd_hdspm_info_c_tms, \
2975 .get = snd_hdspm_get_c_tms, \ 3009 .get = snd_hdspm_get_c_tms, \
2976 .put = snd_hdspm_put_c_tms \ 3010 .put = snd_hdspm_put_c_tms \
2977} 3011}
2978 3012
2979static int hdspm_c_tms(struct hdspm * hdspm) 3013static int hdspm_c_tms(struct hdspm * hdspm)
@@ -3024,12 +3058,12 @@ static int snd_hdspm_put_c_tms(struct snd_kcontrol *kcontrol,
3024 3058
3025 3059
3026#define HDSPM_SAFE_MODE(xname, xindex) \ 3060#define HDSPM_SAFE_MODE(xname, xindex) \
3027{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 3061{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
3028 .name = xname, \ 3062 .name = xname, \
3029 .index = xindex, \ 3063 .index = xindex, \
3030 .info = snd_hdspm_info_safe_mode, \ 3064 .info = snd_hdspm_info_safe_mode, \
3031 .get = snd_hdspm_get_safe_mode, \ 3065 .get = snd_hdspm_get_safe_mode, \
3032 .put = snd_hdspm_put_safe_mode \ 3066 .put = snd_hdspm_put_safe_mode \
3033} 3067}
3034 3068
3035static int hdspm_safe_mode(struct hdspm * hdspm) 3069static int hdspm_safe_mode(struct hdspm * hdspm)
@@ -3080,12 +3114,12 @@ static int snd_hdspm_put_safe_mode(struct snd_kcontrol *kcontrol,
3080 3114
3081 3115
3082#define HDSPM_EMPHASIS(xname, xindex) \ 3116#define HDSPM_EMPHASIS(xname, xindex) \
3083{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 3117{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
3084 .name = xname, \ 3118 .name = xname, \
3085 .index = xindex, \ 3119 .index = xindex, \
3086 .info = snd_hdspm_info_emphasis, \ 3120 .info = snd_hdspm_info_emphasis, \
3087 .get = snd_hdspm_get_emphasis, \ 3121 .get = snd_hdspm_get_emphasis, \
3088 .put = snd_hdspm_put_emphasis \ 3122 .put = snd_hdspm_put_emphasis \
3089} 3123}
3090 3124
3091static int hdspm_emphasis(struct hdspm * hdspm) 3125static int hdspm_emphasis(struct hdspm * hdspm)
@@ -3136,12 +3170,12 @@ static int snd_hdspm_put_emphasis(struct snd_kcontrol *kcontrol,
3136 3170
3137 3171
3138#define HDSPM_DOLBY(xname, xindex) \ 3172#define HDSPM_DOLBY(xname, xindex) \
3139{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 3173{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
3140 .name = xname, \ 3174 .name = xname, \
3141 .index = xindex, \ 3175 .index = xindex, \
3142 .info = snd_hdspm_info_dolby, \ 3176 .info = snd_hdspm_info_dolby, \
3143 .get = snd_hdspm_get_dolby, \ 3177 .get = snd_hdspm_get_dolby, \
3144 .put = snd_hdspm_put_dolby \ 3178 .put = snd_hdspm_put_dolby \
3145} 3179}
3146 3180
3147static int hdspm_dolby(struct hdspm * hdspm) 3181static int hdspm_dolby(struct hdspm * hdspm)
@@ -3192,12 +3226,12 @@ static int snd_hdspm_put_dolby(struct snd_kcontrol *kcontrol,
3192 3226
3193 3227
3194#define HDSPM_PROFESSIONAL(xname, xindex) \ 3228#define HDSPM_PROFESSIONAL(xname, xindex) \
3195{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 3229{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
3196 .name = xname, \ 3230 .name = xname, \
3197 .index = xindex, \ 3231 .index = xindex, \
3198 .info = snd_hdspm_info_professional, \ 3232 .info = snd_hdspm_info_professional, \
3199 .get = snd_hdspm_get_professional, \ 3233 .get = snd_hdspm_get_professional, \
3200 .put = snd_hdspm_put_professional \ 3234 .put = snd_hdspm_put_professional \
3201} 3235}
3202 3236
3203static int hdspm_professional(struct hdspm * hdspm) 3237static int hdspm_professional(struct hdspm * hdspm)
@@ -3247,12 +3281,12 @@ static int snd_hdspm_put_professional(struct snd_kcontrol *kcontrol,
3247} 3281}
3248 3282
3249#define HDSPM_INPUT_SELECT(xname, xindex) \ 3283#define HDSPM_INPUT_SELECT(xname, xindex) \
3250{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 3284{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
3251 .name = xname, \ 3285 .name = xname, \
3252 .index = xindex, \ 3286 .index = xindex, \
3253 .info = snd_hdspm_info_input_select, \ 3287 .info = snd_hdspm_info_input_select, \
3254 .get = snd_hdspm_get_input_select, \ 3288 .get = snd_hdspm_get_input_select, \
3255 .put = snd_hdspm_put_input_select \ 3289 .put = snd_hdspm_put_input_select \
3256} 3290}
3257 3291
3258static int hdspm_input_select(struct hdspm * hdspm) 3292static int hdspm_input_select(struct hdspm * hdspm)
@@ -3319,12 +3353,12 @@ static int snd_hdspm_put_input_select(struct snd_kcontrol *kcontrol,
3319 3353
3320 3354
3321#define HDSPM_DS_WIRE(xname, xindex) \ 3355#define HDSPM_DS_WIRE(xname, xindex) \
3322{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 3356{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
3323 .name = xname, \ 3357 .name = xname, \
3324 .index = xindex, \ 3358 .index = xindex, \
3325 .info = snd_hdspm_info_ds_wire, \ 3359 .info = snd_hdspm_info_ds_wire, \
3326 .get = snd_hdspm_get_ds_wire, \ 3360 .get = snd_hdspm_get_ds_wire, \
3327 .put = snd_hdspm_put_ds_wire \ 3361 .put = snd_hdspm_put_ds_wire \
3328} 3362}
3329 3363
3330static int hdspm_ds_wire(struct hdspm * hdspm) 3364static int hdspm_ds_wire(struct hdspm * hdspm)
@@ -3391,12 +3425,12 @@ static int snd_hdspm_put_ds_wire(struct snd_kcontrol *kcontrol,
3391 3425
3392 3426
3393#define HDSPM_QS_WIRE(xname, xindex) \ 3427#define HDSPM_QS_WIRE(xname, xindex) \
3394{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 3428{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
3395 .name = xname, \ 3429 .name = xname, \
3396 .index = xindex, \ 3430 .index = xindex, \
3397 .info = snd_hdspm_info_qs_wire, \ 3431 .info = snd_hdspm_info_qs_wire, \
3398 .get = snd_hdspm_get_qs_wire, \ 3432 .get = snd_hdspm_get_qs_wire, \
3399 .put = snd_hdspm_put_qs_wire \ 3433 .put = snd_hdspm_put_qs_wire \
3400} 3434}
3401 3435
3402static int hdspm_qs_wire(struct hdspm * hdspm) 3436static int hdspm_qs_wire(struct hdspm * hdspm)
@@ -3563,15 +3597,15 @@ static int snd_hdspm_put_madi_speedmode(struct snd_kcontrol *kcontrol,
3563} 3597}
3564 3598
3565#define HDSPM_MIXER(xname, xindex) \ 3599#define HDSPM_MIXER(xname, xindex) \
3566{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 3600{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
3567 .name = xname, \ 3601 .name = xname, \
3568 .index = xindex, \ 3602 .index = xindex, \
3569 .device = 0, \ 3603 .device = 0, \
3570 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \ 3604 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
3571 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 3605 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
3572 .info = snd_hdspm_info_mixer, \ 3606 .info = snd_hdspm_info_mixer, \
3573 .get = snd_hdspm_get_mixer, \ 3607 .get = snd_hdspm_get_mixer, \
3574 .put = snd_hdspm_put_mixer \ 3608 .put = snd_hdspm_put_mixer \
3575} 3609}
3576 3610
3577static int snd_hdspm_info_mixer(struct snd_kcontrol *kcontrol, 3611static int snd_hdspm_info_mixer(struct snd_kcontrol *kcontrol,
@@ -3670,12 +3704,12 @@ static int snd_hdspm_put_mixer(struct snd_kcontrol *kcontrol,
3670*/ 3704*/
3671 3705
3672#define HDSPM_PLAYBACK_MIXER \ 3706#define HDSPM_PLAYBACK_MIXER \
3673{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 3707{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
3674 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE | \ 3708 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE | \
3675 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 3709 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
3676 .info = snd_hdspm_info_playback_mixer, \ 3710 .info = snd_hdspm_info_playback_mixer, \
3677 .get = snd_hdspm_get_playback_mixer, \ 3711 .get = snd_hdspm_get_playback_mixer, \
3678 .put = snd_hdspm_put_playback_mixer \ 3712 .put = snd_hdspm_put_playback_mixer \
3679} 3713}
3680 3714
3681static int snd_hdspm_info_playback_mixer(struct snd_kcontrol *kcontrol, 3715static int snd_hdspm_info_playback_mixer(struct snd_kcontrol *kcontrol,
@@ -3851,12 +3885,17 @@ static int hdspm_sync_in_sync_check(struct hdspm *hdspm)
3851 break; 3885 break;
3852 3886
3853 case MADI: 3887 case MADI:
3854 case AES32: 3888 status = hdspm_read(hdspm, HDSPM_statusRegister);
3855 status = hdspm_read(hdspm, HDSPM_statusRegister2);
3856 lock = (status & HDSPM_syncInLock) ? 1 : 0; 3889 lock = (status & HDSPM_syncInLock) ? 1 : 0;
3857 sync = (status & HDSPM_syncInSync) ? 1 : 0; 3890 sync = (status & HDSPM_syncInSync) ? 1 : 0;
3858 break; 3891 break;
3859 3892
3893 case AES32:
3894 status = hdspm_read(hdspm, HDSPM_statusRegister2);
3895 lock = (status & 0x100000) ? 1 : 0;
3896 sync = (status & 0x200000) ? 1 : 0;
3897 break;
3898
3860 case MADIface: 3899 case MADIface:
3861 break; 3900 break;
3862 } 3901 }
@@ -3942,6 +3981,7 @@ static int snd_hdspm_get_sync_check(struct snd_kcontrol *kcontrol,
3942 default: 3981 default:
3943 val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1); 3982 val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1);
3944 } 3983 }
3984 break;
3945 3985
3946 case AIO: 3986 case AIO:
3947 switch (kcontrol->private_value) { 3987 switch (kcontrol->private_value) {
@@ -3954,6 +3994,7 @@ static int snd_hdspm_get_sync_check(struct snd_kcontrol *kcontrol,
3954 default: 3994 default:
3955 val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1); 3995 val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1);
3956 } 3996 }
3997 break;
3957 3998
3958 case MADI: 3999 case MADI:
3959 switch (kcontrol->private_value) { 4000 switch (kcontrol->private_value) {
@@ -3966,6 +4007,7 @@ static int snd_hdspm_get_sync_check(struct snd_kcontrol *kcontrol,
3966 case 3: /* SYNC_IN */ 4007 case 3: /* SYNC_IN */
3967 val = hdspm_sync_in_sync_check(hdspm); break; 4008 val = hdspm_sync_in_sync_check(hdspm); break;
3968 } 4009 }
4010 break;
3969 4011
3970 case MADIface: 4012 case MADIface:
3971 val = hdspm_madi_sync_check(hdspm); /* MADI */ 4013 val = hdspm_madi_sync_check(hdspm); /* MADI */
@@ -3983,6 +4025,7 @@ static int snd_hdspm_get_sync_check(struct snd_kcontrol *kcontrol,
3983 val = hdspm_aes_sync_check(hdspm, 4025 val = hdspm_aes_sync_check(hdspm,
3984 kcontrol->private_value-1); 4026 kcontrol->private_value-1);
3985 } 4027 }
4028 break;
3986 4029
3987 } 4030 }
3988 4031
@@ -4427,9 +4470,10 @@ static struct snd_kcontrol_new snd_hdspm_controls_madi[] = {
4427 HDSPM_PREF_SYNC_REF("Preferred Sync Reference", 0), 4470 HDSPM_PREF_SYNC_REF("Preferred Sync Reference", 0),
4428 HDSPM_AUTOSYNC_REF("AutoSync Reference", 0), 4471 HDSPM_AUTOSYNC_REF("AutoSync Reference", 0),
4429 HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0), 4472 HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
4473 HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
4430 HDSPM_SYNC_CHECK("WC SyncCheck", 0), 4474 HDSPM_SYNC_CHECK("WC SyncCheck", 0),
4431 HDSPM_SYNC_CHECK("MADI SyncCheck", 1), 4475 HDSPM_SYNC_CHECK("MADI SyncCheck", 1),
4432 HDSPM_SYNC_CHECK("TCO SyncCHeck", 2), 4476 HDSPM_SYNC_CHECK("TCO SyncCheck", 2),
4433 HDSPM_SYNC_CHECK("SYNC IN SyncCheck", 3), 4477 HDSPM_SYNC_CHECK("SYNC IN SyncCheck", 3),
4434 HDSPM_LINE_OUT("Line Out", 0), 4478 HDSPM_LINE_OUT("Line Out", 0),
4435 HDSPM_TX_64("TX 64 channels mode", 0), 4479 HDSPM_TX_64("TX 64 channels mode", 0),
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 2b2dadc54dac..3fddc7ad1127 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1045,6 +1045,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
1045 struct snd_kcontrol *kcontrol, int event) 1045 struct snd_kcontrol *kcontrol, int event)
1046{ 1046{
1047 struct snd_soc_codec *codec = w->codec; 1047 struct snd_soc_codec *codec = w->codec;
1048 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1048 struct wm8994 *control = codec->control_data; 1049 struct wm8994 *control = codec->control_data;
1049 int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; 1050 int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
1050 int i; 1051 int i;
@@ -1063,6 +1064,10 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
1063 1064
1064 switch (event) { 1065 switch (event) {
1065 case SND_SOC_DAPM_PRE_PMU: 1066 case SND_SOC_DAPM_PRE_PMU:
1067 /* Don't enable timeslot 2 if not in use */
1068 if (wm8994->channels[0] <= 2)
1069 mask &= ~(WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
1070
1066 val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1); 1071 val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1);
1067 if ((val & WM8994_AIF1ADCL_SRC) && 1072 if ((val & WM8994_AIF1ADCL_SRC) &&
1068 (val & WM8994_AIF1ADCR_SRC)) 1073 (val & WM8994_AIF1ADCR_SRC))
@@ -2687,7 +2692,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
2687 return -EINVAL; 2692 return -EINVAL;
2688 } 2693 }
2689 2694
2690 bclk_rate = params_rate(params) * 4; 2695 bclk_rate = params_rate(params);
2691 switch (params_format(params)) { 2696 switch (params_format(params)) {
2692 case SNDRV_PCM_FORMAT_S16_LE: 2697 case SNDRV_PCM_FORMAT_S16_LE:
2693 bclk_rate *= 16; 2698 bclk_rate *= 16;
@@ -2708,6 +2713,17 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
2708 return -EINVAL; 2713 return -EINVAL;
2709 } 2714 }
2710 2715
2716 wm8994->channels[id] = params_channels(params);
2717 switch (params_channels(params)) {
2718 case 1:
2719 case 2:
2720 bclk_rate *= 2;
2721 break;
2722 default:
2723 bclk_rate *= 4;
2724 break;
2725 }
2726
2711 /* Try to find an appropriate sample rate; look for an exact match. */ 2727 /* Try to find an appropriate sample rate; look for an exact match. */
2712 for (i = 0; i < ARRAY_SIZE(srs); i++) 2728 for (i = 0; i < ARRAY_SIZE(srs); i++)
2713 if (srs[i].rate == params_rate(params)) 2729 if (srs[i].rate == params_rate(params))
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index f142ec198db3..ccbce5791e95 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -77,6 +77,7 @@ struct wm8994_priv {
77 int sysclk_rate[2]; 77 int sysclk_rate[2];
78 int mclk[2]; 78 int mclk[2];
79 int aifclk[2]; 79 int aifclk[2];
80 int channels[2];
80 struct wm8994_fll_config fll[2], fll_suspend[2]; 81 struct wm8994_fll_config fll[2], fll_suspend[2];
81 struct completion fll_locked[2]; 82 struct completion fll_locked[2];
82 bool fll_locked_irq; 83 bool fll_locked_irq;
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
index 356611d9654d..54f7e25b6f7d 100644
--- a/sound/soc/ux500/mop500.c
+++ b/sound/soc/ux500/mop500.c
@@ -57,6 +57,20 @@ static struct snd_soc_card mop500_card = {
57 .num_links = ARRAY_SIZE(mop500_dai_links), 57 .num_links = ARRAY_SIZE(mop500_dai_links),
58}; 58};
59 59
60static void mop500_of_node_put(void)
61{
62 int i;
63
64 for (i = 0; i < 2; i++) {
65 if (mop500_dai_links[i].cpu_of_node)
66 of_node_put((struct device_node *)
67 mop500_dai_links[i].cpu_of_node);
68 if (mop500_dai_links[i].codec_of_node)
69 of_node_put((struct device_node *)
70 mop500_dai_links[i].codec_of_node);
71 }
72}
73
60static int __devinit mop500_of_probe(struct platform_device *pdev, 74static int __devinit mop500_of_probe(struct platform_device *pdev,
61 struct device_node *np) 75 struct device_node *np)
62{ 76{
@@ -69,6 +83,7 @@ static int __devinit mop500_of_probe(struct platform_device *pdev,
69 83
70 if (!(msp_np[0] && msp_np[1] && codec_np)) { 84 if (!(msp_np[0] && msp_np[1] && codec_np)) {
71 dev_err(&pdev->dev, "Phandle missing or invalid\n"); 85 dev_err(&pdev->dev, "Phandle missing or invalid\n");
86 mop500_of_node_put();
72 return -EINVAL; 87 return -EINVAL;
73 } 88 }
74 89
@@ -83,6 +98,7 @@ static int __devinit mop500_of_probe(struct platform_device *pdev,
83 98
84 return 0; 99 return 0;
85} 100}
101
86static int __devinit mop500_probe(struct platform_device *pdev) 102static int __devinit mop500_probe(struct platform_device *pdev)
87{ 103{
88 struct device_node *np = pdev->dev.of_node; 104 struct device_node *np = pdev->dev.of_node;
@@ -128,6 +144,7 @@ static int __devexit mop500_remove(struct platform_device *pdev)
128 144
129 snd_soc_unregister_card(mop500_card); 145 snd_soc_unregister_card(mop500_card);
130 mop500_ab8500_remove(mop500_card); 146 mop500_ab8500_remove(mop500_card);
147 mop500_of_node_put();
131 148
132 return 0; 149 return 0;
133} 150}
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
index b7c996e77570..a26c6bf0a29b 100644
--- a/sound/soc/ux500/ux500_msp_i2s.c
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -18,6 +18,7 @@
18#include <linux/pinctrl/consumer.h> 18#include <linux/pinctrl/consumer.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/io.h>
21#include <linux/of.h> 22#include <linux/of.h>
22 23
23#include <mach/hardware.h> 24#include <mach/hardware.h>
@@ -697,14 +698,11 @@ int ux500_msp_i2s_init_msp(struct platform_device *pdev,
697 platform_data = devm_kzalloc(&pdev->dev, 698 platform_data = devm_kzalloc(&pdev->dev,
698 sizeof(struct msp_i2s_platform_data), GFP_KERNEL); 699 sizeof(struct msp_i2s_platform_data), GFP_KERNEL);
699 if (!platform_data) 700 if (!platform_data)
700 ret = -ENOMEM; 701 return -ENOMEM;
701 } 702 }
702 } else 703 } else
703 if (!platform_data) 704 if (!platform_data)
704 ret = -EINVAL; 705 return -EINVAL;
705
706 if (ret)
707 goto err_res;
708 706
709 dev_dbg(&pdev->dev, "%s: Enter (name: %s, id: %d).\n", __func__, 707 dev_dbg(&pdev->dev, "%s: Enter (name: %s, id: %d).\n", __func__,
710 pdev->name, platform_data->id); 708 pdev->name, platform_data->id);
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 411ee5664e98..178b88ae3d2f 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -414,7 +414,7 @@ static int show_html_page(const char *perf_cmd)
414int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused) 414int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
415{ 415{
416 bool show_all = false; 416 bool show_all = false;
417 enum help_format help_format = HELP_FORMAT_NONE; 417 enum help_format help_format = HELP_FORMAT_MAN;
418 struct option builtin_help_options[] = { 418 struct option builtin_help_options[] = {
419 OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), 419 OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
420 OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN), 420 OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN),
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index dec8ced61fb0..7aaee39f6774 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -56,6 +56,10 @@ static int trace__read_syscall_info(struct trace *trace, int id)
56{ 56{
57 char tp_name[128]; 57 char tp_name[128];
58 struct syscall *sc; 58 struct syscall *sc;
59 const char *name = audit_syscall_to_name(id, trace->audit_machine);
60
61 if (name == NULL)
62 return -1;
59 63
60 if (id > trace->syscalls.max) { 64 if (id > trace->syscalls.max) {
61 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); 65 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
@@ -75,11 +79,8 @@ static int trace__read_syscall_info(struct trace *trace, int id)
75 } 79 }
76 80
77 sc = trace->syscalls.table + id; 81 sc = trace->syscalls.table + id;
78 sc->name = audit_syscall_to_name(id, trace->audit_machine); 82 sc->name = name;
79 if (sc->name == NULL) 83 sc->fmt = syscall_fmt__find(sc->name);
80 return -1;
81
82 sc->fmt = syscall_fmt__find(sc->name);
83 84
84 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 85 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
85 sc->tp_format = event_format__new("syscalls", tp_name); 86 sc->tp_format = event_format__new("syscalls", tp_name);
@@ -267,6 +268,13 @@ again:
267 if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1) 268 if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1)
268 printf("%d ", sample.tid); 269 printf("%d ", sample.tid);
269 270
271 if (sample.raw_data == NULL) {
272 printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
273 perf_evsel__name(evsel), sample.tid,
274 sample.cpu, sample.raw_size);
275 continue;
276 }
277
270 handler = evsel->handler.func; 278 handler = evsel->handler.func;
271 handler(trace, evsel, &sample); 279 handler(trace, evsel, &sample);
272 } 280 }
diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c
index 28c18d1d52c3..516ecd9ddd6e 100644
--- a/tools/perf/util/parse-events-test.c
+++ b/tools/perf/util/parse-events-test.c
@@ -513,7 +513,8 @@ static int test__group1(struct perf_evlist *evlist)
513 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); 513 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
514 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); 514 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
515 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); 515 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
516 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 516 /* use of precise requires exclude_guest */
517 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
517 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 518 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
518 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); 519 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
519 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 520 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
@@ -599,7 +600,8 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
599 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); 600 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
600 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); 601 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
601 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); 602 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
602 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 603 /* use of precise requires exclude_guest */
604 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
603 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 605 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
604 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3); 606 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3);
605 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 607 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
@@ -662,7 +664,8 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
662 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); 664 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
663 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); 665 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
664 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); 666 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
665 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 667 /* use of precise requires exclude_guest */
668 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
666 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 669 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
667 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); 670 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1);
668 TEST_ASSERT_VAL("wrong group name", !evsel->group_name); 671 TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
@@ -676,7 +679,8 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
676 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); 679 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
677 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); 680 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
678 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); 681 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
679 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 682 /* use of precise requires exclude_guest */
683 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
680 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 684 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
681 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); 685 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
682 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 686 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index fb4b7ea6752f..8b3e5939afb6 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -39,7 +39,6 @@ int thread__set_comm(struct thread *self, const char *comm)
39 err = self->comm == NULL ? -ENOMEM : 0; 39 err = self->comm == NULL ? -ENOMEM : 0;
40 if (!err) { 40 if (!err) {
41 self->comm_set = true; 41 self->comm_set = true;
42 map_groups__flush(&self->mg);
43 } 42 }
44 return err; 43 return err;
45} 44}
diff --git a/tools/testing/selftests/epoll/test_epoll.c b/tools/testing/selftests/epoll/test_epoll.c
index e0fcff1e8331..f7525392ce84 100644
--- a/tools/testing/selftests/epoll/test_epoll.c
+++ b/tools/testing/selftests/epoll/test_epoll.c
@@ -162,14 +162,14 @@ void *write_thread_function(void *function_data)
162 int index; 162 int index;
163 struct write_thread_data *thread_data = 163 struct write_thread_data *thread_data =
164 (struct write_thread_data *)function_data; 164 (struct write_thread_data *)function_data;
165 while (!write_thread_data->stop) 165 while (!thread_data->stop)
166 for (index = 0; 166 for (index = 0;
167 !thread_data->stop && (index < thread_data->n_fds); 167 !thread_data->stop && (index < thread_data->n_fds);
168 ++index) 168 ++index)
169 if ((write(thread_data->fds[index], &data, 1) < 1) && 169 if ((write(thread_data->fds[index], &data, 1) < 1) &&
170 (errno != EAGAIN) && 170 (errno != EAGAIN) &&
171 (errno != EWOULDBLOCK)) { 171 (errno != EWOULDBLOCK)) {
172 write_thread_data->status = errno; 172 thread_data->status = errno;
173 return; 173 return;
174 } 174 }
175} 175}
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index cd1b03e80899..b76edf2f8333 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -35,7 +35,7 @@
35#include <sys/mount.h> 35#include <sys/mount.h>
36#include <sys/statfs.h> 36#include <sys/statfs.h>
37#include "../../include/uapi/linux/magic.h" 37#include "../../include/uapi/linux/magic.h"
38#include "../../include/linux/kernel-page-flags.h" 38#include "../../include/uapi/linux/kernel-page-flags.h"
39 39
40 40
41#ifndef MAX_PATH 41#ifndef MAX_PATH
diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
index af0f22fb1ef7..aca6edcbbc6f 100644
--- a/usr/gen_init_cpio.c
+++ b/usr/gen_init_cpio.c
@@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
303 int retval; 303 int retval;
304 int rc = -1; 304 int rc = -1;
305 int namesize; 305 int namesize;
306 int i; 306 unsigned int i;
307 307
308 mode |= S_IFREG; 308 mode |= S_IFREG;
309 309
@@ -381,25 +381,28 @@ error:
381 381
382static char *cpio_replace_env(char *new_location) 382static char *cpio_replace_env(char *new_location)
383{ 383{
384 char expanded[PATH_MAX + 1]; 384 char expanded[PATH_MAX + 1];
385 char env_var[PATH_MAX + 1]; 385 char env_var[PATH_MAX + 1];
386 char *start; 386 char *start;
387 char *end; 387 char *end;
388 388
389 for (start = NULL; (start = strstr(new_location, "${")); ) { 389 for (start = NULL; (start = strstr(new_location, "${")); ) {
390 end = strchr(start, '}'); 390 end = strchr(start, '}');
391 if (start < end) { 391 if (start < end) {
392 *env_var = *expanded = '\0'; 392 *env_var = *expanded = '\0';
393 strncat(env_var, start + 2, end - start - 2); 393 strncat(env_var, start + 2, end - start - 2);
394 strncat(expanded, new_location, start - new_location); 394 strncat(expanded, new_location, start - new_location);
395 strncat(expanded, getenv(env_var), PATH_MAX); 395 strncat(expanded, getenv(env_var),
396 strncat(expanded, end + 1, PATH_MAX); 396 PATH_MAX - strlen(expanded));
397 strncpy(new_location, expanded, PATH_MAX); 397 strncat(expanded, end + 1,
398 } else 398 PATH_MAX - strlen(expanded));
399 break; 399 strncpy(new_location, expanded, PATH_MAX);
400 } 400 new_location[PATH_MAX] = 0;
401 401 } else
402 return new_location; 402 break;
403 }
404
405 return new_location;
403} 406}
404 407
405 408
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e59bb63cb089..be70035fd42a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1322,9 +1322,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1322 1322
1323void kvm_release_pfn_clean(pfn_t pfn) 1323void kvm_release_pfn_clean(pfn_t pfn)
1324{ 1324{
1325 WARN_ON(is_error_pfn(pfn)); 1325 if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
1326
1327 if (!kvm_is_mmio_pfn(pfn))
1328 put_page(pfn_to_page(pfn)); 1326 put_page(pfn_to_page(pfn));
1329} 1327}
1330EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1328EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);