aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cpu-freq/intel-pstate.txt37
-rw-r--r--Documentation/kernel-parameters.txt12
-rw-r--r--Documentation/networking/timestamping.txt2
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/exynos5250-snow.dts4
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/configs/exynos_defconfig2
-rw-r--r--arch/arm/include/asm/thread_info.h11
-rw-r--r--arch/arm/kernel/traps.c31
-rw-r--r--arch/arm/kvm/mmu.c10
-rw-r--r--arch/arm/mach-mvebu/coherency.c2
-rw-r--r--arch/arm/mach-tegra/irq.c22
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xscale.S4
-rw-r--r--arch/arm64/kvm/sys_regs.c9
-rw-r--r--arch/ia64/kvm/kvm-ia64.c2
-rw-r--r--arch/mips/Kconfig14
-rw-r--r--arch/mips/include/asm/mipsregs.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h4
-rw-r--r--arch/mips/include/asm/uaccess.h6
-rw-r--r--arch/mips/include/uapi/asm/unistd.h2
-rw-r--r--arch/mips/kernel/bmips_vec.S3
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/cpu-probe.c33
-rw-r--r--arch/mips/kernel/rtlx.c4
-rw-r--r--arch/mips/kernel/setup.c4
-rw-r--r--arch/mips/kernel/signal.c8
-rw-r--r--arch/mips/loongson/common/Makefile3
-rw-r--r--arch/mips/mm/tlbex.c10
-rw-r--r--arch/mips/mti-sead3/sead3-leds.c8
-rw-r--r--arch/mips/netlogic/xlp/Makefile12
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h2
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c10
-rw-r--r--arch/powerpc/kernel/vdso32/getcpu.S4
-rw-r--r--arch/powerpc/platforms/powernv/opal-hmi.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci.c3
-rw-r--r--arch/powerpc/platforms/pseries/msi.c2
-rw-r--r--arch/powerpc/xmon/xmon.c6
-rw-r--r--arch/sparc/include/asm/dma-mapping.h8
-rw-r--r--arch/x86/include/asm/cpufeature.h5
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h41
-rw-r--r--arch/x86/kernel/cpu/scattered.c5
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/clk/at91/clk-usb.c35
-rw-r--r--drivers/clk/clk-divider.c18
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c4
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c2
-rw-r--r--drivers/clk/rockchip/clk.c4
-rw-r--r--drivers/cpufreq/Kconfig57
-rw-r--r--drivers/cpufreq/Kconfig.arm8
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/arm_big_little.c7
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c66
-rw-r--r--drivers/cpufreq/cpufreq.c53
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c11
-rw-r--r--drivers/cpufreq/intel_pstate.c182
-rw-r--r--drivers/cpufreq/ls1x-cpufreq.c223
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/hwmon/g762.c6
-rw-r--r--drivers/iio/accel/bmc150-accel.c40
-rw-r--r--drivers/iio/accel/kxcjk-1013.c2
-rw-r--r--drivers/iio/adc/men_z188_adc.c1
-rw-r--r--drivers/iio/gyro/bmg160.c53
-rw-r--r--drivers/input/joystick/xpad.c16
-rw-r--r--drivers/input/mouse/elantech.c10
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c6
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c4
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c58
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c13
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c5
-rw-r--r--drivers/net/xen-netback/xenbus.c15
-rw-r--r--drivers/pci/msi.c26
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c7
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c15
-rw-r--r--drivers/scsi/ufs/ufshcd.c104
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-sirf.c4
-rw-r--r--drivers/spi/spi.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c22
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/thermal/cpu_cooling.c37
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c9
-rw-r--r--drivers/thermal/st/st_thermal.c3
-rw-r--r--drivers/tty/serial/of_serial.c27
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/ep0.c8
-rw-r--r--drivers/usb/host/xhci-hub.c5
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-plat.c10
-rw-r--r--drivers/usb/host/xhci-ring.c43
-rw-r--r--drivers/usb/host/xhci.c107
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c33
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h39
-rw-r--r--drivers/usb/serial/keyspan.c97
-rw-r--r--drivers/usb/serial/ssu100.c11
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--fs/aio.c21
-rw-r--r--fs/btrfs/compression.c33
-rw-r--r--fs/btrfs/compression.h4
-rw-r--r--fs/btrfs/lzo.c15
-rw-r--r--fs/btrfs/zlib.c20
-rw-r--r--fs/nfsd/nfs4callback.c8
-rw-r--r--fs/nfsd/nfsd.h9
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-apq8084.h2
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/cpufreq.h54
-rw-r--r--include/linux/iio/events.h2
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/uapi/sound/asound.h4
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/core/rtnetlink.c23
-rw-r--r--net/ipv4/af_inet.c11
-rw-r--r--net/ipv4/ip_vti.c1
-rw-r--r--net/ipv4/ping.c14
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6_offload.c3
-rw-r--r--net/ipv6/ip6_udp_tunnel.c4
-rw-r--r--net/ipv6/ip6_vti.c11
-rw-r--r--net/ipv6/tcp_ipv6.c5
-rw-r--r--net/netfilter/nf_conntrack_core.c14
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sunrpc/svcsock.c27
-rw-r--r--sound/core/pcm.c2
-rw-r--r--sound/core/pcm_misc.c8
-rw-r--r--sound/pci/hda/hda_intel.c24
-rw-r--r--sound/pci/hda/hda_priv.h1
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/usb/mixer_quirks.c4
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--virt/kvm/arm/vgic.c8
-rw-r--r--virt/kvm/kvm_main.c16
164 files changed, 1715 insertions, 741 deletions
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
index a69ffe1d54d5..765d7fc0e692 100644
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ b/Documentation/cpu-freq/intel-pstate.txt
@@ -1,17 +1,28 @@
1Intel P-state driver 1Intel P-state driver
2-------------------- 2--------------------
3 3
4This driver implements a scaling driver with an internal governor for 4This driver provides an interface to control the P state selection for
5Intel Core processors. The driver follows the same model as the 5SandyBridge+ Intel processors. The driver can operate two different
6Transmeta scaling driver (longrun.c) and implements the setpolicy() 6modes based on the processor model legacy and Hardware P state (HWP)
7instead of target(). Scaling drivers that implement setpolicy() are 7mode.
8assumed to implement internal governors by the cpufreq core. All the 8
9logic for selecting the current P state is contained within the 9In legacy mode the driver implements a scaling driver with an internal
10driver; no external governor is used by the cpufreq core. 10governor for Intel Core processors. The driver follows the same model
11 11as the Transmeta scaling driver (longrun.c) and implements the
12Intel SandyBridge+ processors are supported. 12setpolicy() instead of target(). Scaling drivers that implement
13 13setpolicy() are assumed to implement internal governors by the cpufreq
14New sysfs files for controlling P state selection have been added to 14core. All the logic for selecting the current P state is contained
15within the driver; no external governor is used by the cpufreq core.
16
17In HWP mode P state selection is implemented in the processor
18itself. The driver provides the interfaces between the cpufreq core and
19the processor to control P state selection based on user preferences
20and reporting frequency to the cpufreq core. In this mode the
21internal governor code is disabled.
22
23In addtion to the interfaces provided by the cpufreq core for
24controlling frequency the driver provides sysfs files for
25controlling P state selection. These files have been added to
15/sys/devices/system/cpu/intel_pstate/ 26/sys/devices/system/cpu/intel_pstate/
16 27
17 max_perf_pct: limits the maximum P state that will be requested by 28 max_perf_pct: limits the maximum P state that will be requested by
@@ -33,7 +44,9 @@ frequency is fiction for Intel Core processors. Even if the scaling
33driver selects a single P state the actual frequency the processor 44driver selects a single P state the actual frequency the processor
34will run at is selected by the processor itself. 45will run at is selected by the processor itself.
35 46
36New debugfs files have also been added to /sys/kernel/debug/pstate_snb/ 47For legacy mode debugfs files have also been added to allow tuning of
48the internal governor algorythm. These files are located at
49/sys/kernel/debug/pstate_snb/ These files are NOT present in HWP mode.
37 50
38 deadband 51 deadband
39 d_gain_pct 52 d_gain_pct
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 479f33204a37..d006bb22f3ca 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1446,6 +1446,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1446 disable 1446 disable
1447 Do not enable intel_pstate as the default 1447 Do not enable intel_pstate as the default
1448 scaling driver for the supported processors 1448 scaling driver for the supported processors
1449 force
1450 Enable intel_pstate on systems that prohibit it by default
1451 in favor of acpi-cpufreq. Forcing the intel_pstate driver
1452 instead of acpi-cpufreq may disable platform features, such
1453 as thermal controls and power capping, that rely on ACPI
1454 P-States information being indicated to OSPM and therefore
1455 should be used with caution. This option does not work with
1456 processors that aren't supported by the intel_pstate driver
1457 or on platforms that use pcc-cpufreq instead of acpi-cpufreq.
1458 no_hwp
1459 Do not enable hardware P state control (HWP)
1460 if available.
1449 1461
1450 intremap= [X86-64, Intel-IOMMU] 1462 intremap= [X86-64, Intel-IOMMU]
1451 on enable Interrupt Remapping (default) 1463 on enable Interrupt Remapping (default)
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 412f45ca2d73..1d6d02d6ba52 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -136,7 +136,7 @@ SOF_TIMESTAMPING_OPT_ID:
136 136
137 This option is implemented only for transmit timestamps. There, the 137 This option is implemented only for transmit timestamps. There, the
138 timestamp is always looped along with a struct sock_extended_err. 138 timestamp is always looped along with a struct sock_extended_err.
139 The option modifies field ee_info to pass an id that is unique 139 The option modifies field ee_data to pass an id that is unique
140 among all possibly concurrently outstanding timestamp requests for 140 among all possibly concurrently outstanding timestamp requests for
141 that socket. In practice, it is a monotonically increasing u32 141 that socket. In practice, it is a monotonically increasing u32
142 (that wraps). 142 (that wraps).
diff --git a/MAINTAINERS b/MAINTAINERS
index 0ff630de8a6d..888d8bd8b5b7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4837,6 +4837,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
4837S: Supported 4837S: Supported
4838F: drivers/idle/intel_idle.c 4838F: drivers/idle/intel_idle.c
4839 4839
4840INTEL PSTATE DRIVER
4841M: Kristen Carlson Accardi <kristen@linux.intel.com>
4842L: linux-pm@vger.kernel.org
4843S: Supported
4844F: drivers/cpufreq/intel_pstate.c
4845
4840INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) 4846INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
4841M: Maik Broemme <mbroemme@plusserver.de> 4847M: Maik Broemme <mbroemme@plusserver.de>
4842L: linux-fbdev@vger.kernel.org 4848L: linux-fbdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 2fd5c4e5c139..ce70361f766e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 18 2PATCHLEVEL = 18
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Diseased Newt 5NAME = Diseased Newt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
index e51fcef884a4..60429ad1c5d8 100644
--- a/arch/arm/boot/dts/exynos5250-snow.dts
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -624,4 +624,8 @@
624 num-cs = <1>; 624 num-cs = <1>;
625}; 625};
626 626
627&usbdrd_dwc3 {
628 dr_mode = "host";
629};
630
627#include "cros-ec-keyboard.dtsi" 631#include "cros-ec-keyboard.dtsi"
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index f21b9aa00fbb..d55c1a2eb798 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -555,7 +555,7 @@
555 #size-cells = <1>; 555 #size-cells = <1>;
556 ranges; 556 ranges;
557 557
558 dwc3 { 558 usbdrd_dwc3: dwc3 {
559 compatible = "synopsys,dwc3"; 559 compatible = "synopsys,dwc3";
560 reg = <0x12000000 0x10000>; 560 reg = <0x12000000 0x10000>;
561 interrupts = <0 72 0>; 561 interrupts = <0 72 0>;
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 72058b8a6f4d..e21ef830a483 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -142,11 +142,13 @@ CONFIG_MMC_DW_IDMAC=y
142CONFIG_MMC_DW_EXYNOS=y 142CONFIG_MMC_DW_EXYNOS=y
143CONFIG_RTC_CLASS=y 143CONFIG_RTC_CLASS=y
144CONFIG_RTC_DRV_MAX77686=y 144CONFIG_RTC_DRV_MAX77686=y
145CONFIG_RTC_DRV_MAX77802=y
145CONFIG_RTC_DRV_S5M=y 146CONFIG_RTC_DRV_S5M=y
146CONFIG_RTC_DRV_S3C=y 147CONFIG_RTC_DRV_S3C=y
147CONFIG_DMADEVICES=y 148CONFIG_DMADEVICES=y
148CONFIG_PL330_DMA=y 149CONFIG_PL330_DMA=y
149CONFIG_COMMON_CLK_MAX77686=y 150CONFIG_COMMON_CLK_MAX77686=y
151CONFIG_COMMON_CLK_MAX77802=y
150CONFIG_COMMON_CLK_S2MPS11=y 152CONFIG_COMMON_CLK_S2MPS11=y
151CONFIG_EXYNOS_IOMMU=y 153CONFIG_EXYNOS_IOMMU=y
152CONFIG_IIO=y 154CONFIG_IIO=y
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index fc44d3761f9e..ce73ab635414 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -44,16 +44,6 @@ struct cpu_context_save {
44 __u32 extra[2]; /* Xscale 'acc' register, etc */ 44 __u32 extra[2]; /* Xscale 'acc' register, etc */
45}; 45};
46 46
47struct arm_restart_block {
48 union {
49 /* For user cache flushing */
50 struct {
51 unsigned long start;
52 unsigned long end;
53 } cache;
54 };
55};
56
57/* 47/*
58 * low level task data that entry.S needs immediate access to. 48 * low level task data that entry.S needs immediate access to.
59 * __switch_to() assumes cpu_context follows immediately after cpu_domain. 49 * __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -79,7 +69,6 @@ struct thread_info {
79 unsigned long thumbee_state; /* ThumbEE Handler Base register */ 69 unsigned long thumbee_state; /* ThumbEE Handler Base register */
80#endif 70#endif
81 struct restart_block restart_block; 71 struct restart_block restart_block;
82 struct arm_restart_block arm_restart_block;
83}; 72};
84 73
85#define INIT_THREAD_INFO(tsk) \ 74#define INIT_THREAD_INFO(tsk) \
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 0c8b10801d36..9f5d81881eb6 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -533,8 +533,6 @@ static int bad_syscall(int n, struct pt_regs *regs)
533 return regs->ARM_r0; 533 return regs->ARM_r0;
534} 534}
535 535
536static long do_cache_op_restart(struct restart_block *);
537
538static inline int 536static inline int
539__do_cache_op(unsigned long start, unsigned long end) 537__do_cache_op(unsigned long start, unsigned long end)
540{ 538{
@@ -543,24 +541,8 @@ __do_cache_op(unsigned long start, unsigned long end)
543 do { 541 do {
544 unsigned long chunk = min(PAGE_SIZE, end - start); 542 unsigned long chunk = min(PAGE_SIZE, end - start);
545 543
546 if (signal_pending(current)) { 544 if (fatal_signal_pending(current))
547 struct thread_info *ti = current_thread_info(); 545 return 0;
548
549 ti->restart_block = (struct restart_block) {
550 .fn = do_cache_op_restart,
551 };
552
553 ti->arm_restart_block = (struct arm_restart_block) {
554 {
555 .cache = {
556 .start = start,
557 .end = end,
558 },
559 },
560 };
561
562 return -ERESTART_RESTARTBLOCK;
563 }
564 546
565 ret = flush_cache_user_range(start, start + chunk); 547 ret = flush_cache_user_range(start, start + chunk);
566 if (ret) 548 if (ret)
@@ -573,15 +555,6 @@ __do_cache_op(unsigned long start, unsigned long end)
573 return 0; 555 return 0;
574} 556}
575 557
576static long do_cache_op_restart(struct restart_block *unused)
577{
578 struct arm_restart_block *restart_block;
579
580 restart_block = &current_thread_info()->arm_restart_block;
581 return __do_cache_op(restart_block->cache.start,
582 restart_block->cache.end);
583}
584
585static inline int 558static inline int
586do_cache_op(unsigned long start, unsigned long end, int flags) 559do_cache_op(unsigned long start, unsigned long end, int flags)
587{ 560{
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 57a403a5c22b..8664ff17cbbe 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
197 pgd = pgdp + pgd_index(addr); 197 pgd = pgdp + pgd_index(addr);
198 do { 198 do {
199 next = kvm_pgd_addr_end(addr, end); 199 next = kvm_pgd_addr_end(addr, end);
200 unmap_puds(kvm, pgd, addr, next); 200 if (!pgd_none(*pgd))
201 unmap_puds(kvm, pgd, addr, next);
201 } while (pgd++, addr = next, addr != end); 202 } while (pgd++, addr = next, addr != end);
202} 203}
203 204
@@ -834,6 +835,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
834 return kvm_vcpu_dabt_iswrite(vcpu); 835 return kvm_vcpu_dabt_iswrite(vcpu);
835} 836}
836 837
838static bool kvm_is_device_pfn(unsigned long pfn)
839{
840 return !pfn_valid(pfn);
841}
842
837static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 843static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
838 struct kvm_memory_slot *memslot, unsigned long hva, 844 struct kvm_memory_slot *memslot, unsigned long hva,
839 unsigned long fault_status) 845 unsigned long fault_status)
@@ -904,7 +910,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
904 if (is_error_pfn(pfn)) 910 if (is_error_pfn(pfn))
905 return -EFAULT; 911 return -EFAULT;
906 912
907 if (kvm_is_mmio_pfn(pfn)) 913 if (kvm_is_device_pfn(pfn))
908 mem_type = PAGE_S2_DEVICE; 914 mem_type = PAGE_S2_DEVICE;
909 915
910 spin_lock(&kvm->mmu_lock); 916 spin_lock(&kvm->mmu_lock);
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 2bdc3233abe2..044b51185fcc 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -400,6 +400,8 @@ int __init coherency_init(void)
400 type == COHERENCY_FABRIC_TYPE_ARMADA_380) 400 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
401 armada_375_380_coherency_init(np); 401 armada_375_380_coherency_init(np);
402 402
403 of_node_put(np);
404
403 return 0; 405 return 0;
404} 406}
405 407
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index da7be13aecce..ab95f5391a2b 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -99,42 +99,42 @@ static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg)
99 99
100static void tegra_mask(struct irq_data *d) 100static void tegra_mask(struct irq_data *d)
101{ 101{
102 if (d->irq < FIRST_LEGACY_IRQ) 102 if (d->hwirq < FIRST_LEGACY_IRQ)
103 return; 103 return;
104 104
105 tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_CLR); 105 tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_CLR);
106} 106}
107 107
108static void tegra_unmask(struct irq_data *d) 108static void tegra_unmask(struct irq_data *d)
109{ 109{
110 if (d->irq < FIRST_LEGACY_IRQ) 110 if (d->hwirq < FIRST_LEGACY_IRQ)
111 return; 111 return;
112 112
113 tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_SET); 113 tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_SET);
114} 114}
115 115
116static void tegra_ack(struct irq_data *d) 116static void tegra_ack(struct irq_data *d)
117{ 117{
118 if (d->irq < FIRST_LEGACY_IRQ) 118 if (d->hwirq < FIRST_LEGACY_IRQ)
119 return; 119 return;
120 120
121 tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR); 121 tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
122} 122}
123 123
124static void tegra_eoi(struct irq_data *d) 124static void tegra_eoi(struct irq_data *d)
125{ 125{
126 if (d->irq < FIRST_LEGACY_IRQ) 126 if (d->hwirq < FIRST_LEGACY_IRQ)
127 return; 127 return;
128 128
129 tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR); 129 tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
130} 130}
131 131
132static int tegra_retrigger(struct irq_data *d) 132static int tegra_retrigger(struct irq_data *d)
133{ 133{
134 if (d->irq < FIRST_LEGACY_IRQ) 134 if (d->hwirq < FIRST_LEGACY_IRQ)
135 return 0; 135 return 0;
136 136
137 tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_SET); 137 tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_SET);
138 138
139 return 1; 139 return 1;
140} 140}
@@ -142,7 +142,7 @@ static int tegra_retrigger(struct irq_data *d)
142#ifdef CONFIG_PM_SLEEP 142#ifdef CONFIG_PM_SLEEP
143static int tegra_set_wake(struct irq_data *d, unsigned int enable) 143static int tegra_set_wake(struct irq_data *d, unsigned int enable)
144{ 144{
145 u32 irq = d->irq; 145 u32 irq = d->hwirq;
146 u32 index, mask; 146 u32 index, mask;
147 147
148 if (irq < FIRST_LEGACY_IRQ || 148 if (irq < FIRST_LEGACY_IRQ ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b3a947863ac7..22ac2a6fbfe3 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -270,7 +270,6 @@ __v7_pj4b_setup:
270/* Auxiliary Debug Modes Control 1 Register */ 270/* Auxiliary Debug Modes Control 1 Register */
271#define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */ 271#define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
272#define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */ 272#define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
273#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
274#define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */ 273#define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
275 274
276/* Auxiliary Debug Modes Control 2 Register */ 275/* Auxiliary Debug Modes Control 2 Register */
@@ -293,7 +292,6 @@ __v7_pj4b_setup:
293 /* Auxiliary Debug Modes Control 1 Register */ 292 /* Auxiliary Debug Modes Control 1 Register */
294 mrc p15, 1, r0, c15, c1, 1 293 mrc p15, 1, r0, c15, c1, 1
295 orr r0, r0, #PJ4B_CLEAN_LINE 294 orr r0, r0, #PJ4B_CLEAN_LINE
296 orr r0, r0, #PJ4B_BCK_OFF_STREX
297 orr r0, r0, #PJ4B_INTER_PARITY 295 orr r0, r0, #PJ4B_INTER_PARITY
298 bic r0, r0, #PJ4B_STATIC_BP 296 bic r0, r0, #PJ4B_STATIC_BP
299 mcr p15, 1, r0, c15, c1, 1 297 mcr p15, 1, r0, c15, c1, 1
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 23259f104c66..afa2b3c4df4a 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend)
535 mrc p15, 0, r5, c15, c1, 0 @ CP access reg 535 mrc p15, 0, r5, c15, c1, 0 @ CP access reg
536 mrc p15, 0, r6, c13, c0, 0 @ PID 536 mrc p15, 0, r6, c13, c0, 0 @ PID
537 mrc p15, 0, r7, c3, c0, 0 @ domain ID 537 mrc p15, 0, r7, c3, c0, 0 @ domain ID
538 mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg 538 mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
539 mrc p15, 0, r9, c1, c0, 0 @ control reg 539 mrc p15, 0, r9, c1, c0, 0 @ control reg
540 bic r4, r4, #2 @ clear frequency change bit 540 bic r4, r4, #2 @ clear frequency change bit
541 stmia r0, {r4 - r9} @ store cp regs 541 stmia r0, {r4 - r9} @ store cp regs
@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume)
552 mcr p15, 0, r6, c13, c0, 0 @ PID 552 mcr p15, 0, r6, c13, c0, 0 @ PID
553 mcr p15, 0, r7, c3, c0, 0 @ domain ID 553 mcr p15, 0, r7, c3, c0, 0 @ domain ID
554 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr 554 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr
555 mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg 555 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
556 mov r0, r9 @ control register 556 mov r0, r9 @ control register
557 b cpu_resume_mmu 557 b cpu_resume_mmu
558ENDPROC(cpu_xscale_do_resume) 558ENDPROC(cpu_xscale_do_resume)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 4cc3b719208e..3d7c2df89946 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -424,6 +424,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
424 /* VBAR_EL1 */ 424 /* VBAR_EL1 */
425 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), 425 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
426 NULL, reset_val, VBAR_EL1, 0 }, 426 NULL, reset_val, VBAR_EL1, 0 },
427
428 /* ICC_SRE_EL1 */
429 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
430 trap_raz_wi },
431
427 /* CONTEXTIDR_EL1 */ 432 /* CONTEXTIDR_EL1 */
428 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), 433 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
429 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, 434 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
@@ -690,6 +695,10 @@ static const struct sys_reg_desc cp15_regs[] = {
690 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, 695 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
691 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, 696 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
692 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 697 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
698
699 /* ICC_SRE */
700 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
701
693 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 702 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
694}; 703};
695 704
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index ec6b9acb6bea..dbe46f43884d 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1563 1563
1564 for (i = 0; i < npages; i++) { 1564 for (i = 0; i < npages; i++) {
1565 pfn = gfn_to_pfn(kvm, base_gfn + i); 1565 pfn = gfn_to_pfn(kvm, base_gfn + i);
1566 if (!kvm_is_mmio_pfn(pfn)) { 1566 if (!kvm_is_reserved_pfn(pfn)) {
1567 kvm_set_pmt_entry(kvm, base_gfn + i, 1567 kvm_set_pmt_entry(kvm, base_gfn + i,
1568 pfn << PAGE_SHIFT, 1568 pfn << PAGE_SHIFT,
1569 _PAGE_AR_RWX | _PAGE_MA_WB); 1569 _PAGE_AR_RWX | _PAGE_MA_WB);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f43aa536c517..9536ef912f59 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2101,9 +2101,17 @@ config 64BIT_PHYS_ADDR
2101config ARCH_PHYS_ADDR_T_64BIT 2101config ARCH_PHYS_ADDR_T_64BIT
2102 def_bool 64BIT_PHYS_ADDR 2102 def_bool 64BIT_PHYS_ADDR
2103 2103
2104choice
2105 prompt "SmartMIPS or microMIPS ASE support"
2106
2107config CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS
2108 bool "None"
2109 help
2110 Select this if you want neither microMIPS nor SmartMIPS support
2111
2104config CPU_HAS_SMARTMIPS 2112config CPU_HAS_SMARTMIPS
2105 depends on SYS_SUPPORTS_SMARTMIPS 2113 depends on SYS_SUPPORTS_SMARTMIPS
2106 bool "Support for the SmartMIPS ASE" 2114 bool "SmartMIPS"
2107 help 2115 help
2108 SmartMIPS is a extension of the MIPS32 architecture aimed at 2116 SmartMIPS is a extension of the MIPS32 architecture aimed at
2109 increased security at both hardware and software level for 2117 increased security at both hardware and software level for
@@ -2115,11 +2123,13 @@ config CPU_HAS_SMARTMIPS
2115 2123
2116config CPU_MICROMIPS 2124config CPU_MICROMIPS
2117 depends on SYS_SUPPORTS_MICROMIPS 2125 depends on SYS_SUPPORTS_MICROMIPS
2118 bool "Build kernel using microMIPS ISA" 2126 bool "microMIPS"
2119 help 2127 help
2120 When this option is enabled the kernel will be built using the 2128 When this option is enabled the kernel will be built using the
2121 microMIPS ISA 2129 microMIPS ISA
2122 2130
2131endchoice
2132
2123config CPU_HAS_MSA 2133config CPU_HAS_MSA
2124 bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)" 2134 bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)"
2125 depends on CPU_SUPPORTS_MSA 2135 depends on CPU_SUPPORTS_MSA
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index b46cd220a018..22a135ac91de 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -661,6 +661,8 @@
661#define MIPS_CONF6_SYND (_ULCAST_(1) << 13) 661#define MIPS_CONF6_SYND (_ULCAST_(1) << 13)
662/* proAptiv FTLB on/off bit */ 662/* proAptiv FTLB on/off bit */
663#define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15) 663#define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15)
664/* FTLB probability bits */
665#define MIPS_CONF6_FTLBP_SHIFT (16)
664 666
665#define MIPS_CONF7_WII (_ULCAST_(1) << 31) 667#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
666 668
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 4520adc8699b..cd6e0afc6833 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -257,7 +257,11 @@ static inline void protected_flush_icache_line(unsigned long addr)
257 */ 257 */
258static inline void protected_writeback_dcache_line(unsigned long addr) 258static inline void protected_writeback_dcache_line(unsigned long addr)
259{ 259{
260#ifdef CONFIG_EVA
261 protected_cachee_op(Hit_Writeback_Inv_D, addr);
262#else
260 protected_cache_op(Hit_Writeback_Inv_D, addr); 263 protected_cache_op(Hit_Writeback_Inv_D, addr);
264#endif
261} 265}
262 266
263static inline void protected_writeback_scache_line(unsigned long addr) 267static inline void protected_writeback_scache_line(unsigned long addr)
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 133678ab4eb8..22a5624e2fd2 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -1422,7 +1422,7 @@ static inline long __strnlen_user(const char __user *s, long n)
1422} 1422}
1423 1423
1424/* 1424/*
1425 * strlen_user: - Get the size of a string in user space. 1425 * strnlen_user: - Get the size of a string in user space.
1426 * @str: The string to measure. 1426 * @str: The string to measure.
1427 * 1427 *
1428 * Context: User context only. This function may sleep. 1428 * Context: User context only. This function may sleep.
@@ -1431,9 +1431,7 @@ static inline long __strnlen_user(const char __user *s, long n)
1431 * 1431 *
1432 * Returns the size of the string INCLUDING the terminating NUL. 1432 * Returns the size of the string INCLUDING the terminating NUL.
1433 * On exception, returns 0. 1433 * On exception, returns 0.
1434 * 1434 * If the string is too long, returns a value greater than @n.
1435 * If there is a limit on the length of a valid string, you may wish to
1436 * consider using strnlen_user() instead.
1437 */ 1435 */
1438static inline long strnlen_user(const char __user *s, long n) 1436static inline long strnlen_user(const char __user *s, long n)
1439{ 1437{
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 9dc58568f230..d001bb1ad177 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -1045,7 +1045,7 @@
1045#define __NR_seccomp (__NR_Linux + 316) 1045#define __NR_seccomp (__NR_Linux + 316)
1046#define __NR_getrandom (__NR_Linux + 317) 1046#define __NR_getrandom (__NR_Linux + 317)
1047#define __NR_memfd_create (__NR_Linux + 318) 1047#define __NR_memfd_create (__NR_Linux + 318)
1048#define __NR_memfd_create (__NR_Linux + 319) 1048#define __NR_bpf (__NR_Linux + 319)
1049 1049
1050/* 1050/*
1051 * Offset of the last N32 flavoured syscall 1051 * Offset of the last N32 flavoured syscall
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index 290c23b51678..86495072a922 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -208,7 +208,6 @@ bmips_reset_nmi_vec_end:
208END(bmips_reset_nmi_vec) 208END(bmips_reset_nmi_vec)
209 209
210 .set pop 210 .set pop
211 .previous
212 211
213/*********************************************************************** 212/***********************************************************************
214 * CPU1 warm restart vector (used for second and subsequent boots). 213 * CPU1 warm restart vector (used for second and subsequent boots).
@@ -281,5 +280,3 @@ LEAF(bmips_enable_xks01)
281 jr ra 280 jr ra
282 281
283END(bmips_enable_xks01) 282END(bmips_enable_xks01)
284
285 .previous
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index e6e97d2a5c9e..0384b05ab5a0 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -229,6 +229,7 @@ LEAF(mips_cps_core_init)
229 nop 229 nop
230 230
231 .set push 231 .set push
232 .set mips32r2
232 .set mt 233 .set mt
233 234
234 /* Only allow 1 TC per VPE to execute... */ 235 /* Only allow 1 TC per VPE to execute... */
@@ -345,6 +346,7 @@ LEAF(mips_cps_boot_vpes)
345 nop 346 nop
346 347
347 .set push 348 .set push
349 .set mips32r2
348 .set mt 350 .set mt
349 351
3501: /* Enter VPE configuration state */ 3521: /* Enter VPE configuration state */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index d5a4f380b019..dc49cf30c2db 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -193,6 +193,32 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
193static char unknown_isa[] = KERN_ERR \ 193static char unknown_isa[] = KERN_ERR \
194 "Unsupported ISA type, c0.config0: %d."; 194 "Unsupported ISA type, c0.config0: %d.";
195 195
196static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
197{
198
199 unsigned int probability = c->tlbsize / c->tlbsizevtlb;
200
201 /*
202 * 0 = All TLBWR instructions go to FTLB
203 * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the
204 * FTLB and 1 goes to the VTLB.
205 * 2 = 7:1: As above with 7:1 ratio.
206 * 3 = 3:1: As above with 3:1 ratio.
207 *
208 * Use the linear midpoint as the probability threshold.
209 */
210 if (probability >= 12)
211 return 1;
212 else if (probability >= 6)
213 return 2;
214 else
215 /*
216 * So FTLB is less than 4 times bigger than VTLB.
217 * A 3:1 ratio can still be useful though.
218 */
219 return 3;
220}
221
196static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) 222static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
197{ 223{
198 unsigned int config6; 224 unsigned int config6;
@@ -203,9 +229,14 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
203 case CPU_P5600: 229 case CPU_P5600:
204 /* proAptiv & related cores use Config6 to enable the FTLB */ 230 /* proAptiv & related cores use Config6 to enable the FTLB */
205 config6 = read_c0_config6(); 231 config6 = read_c0_config6();
232 /* Clear the old probability value */
233 config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
206 if (enable) 234 if (enable)
207 /* Enable FTLB */ 235 /* Enable FTLB */
208 write_c0_config6(config6 | MIPS_CONF6_FTLBEN); 236 write_c0_config6(config6 |
237 (calculate_ftlb_probability(c)
238 << MIPS_CONF6_FTLBP_SHIFT)
239 | MIPS_CONF6_FTLBEN);
209 else 240 else
210 /* Disable FTLB */ 241 /* Disable FTLB */
211 write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN); 242 write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 31b1b763cb29..c5c4fd54d797 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -94,12 +94,12 @@ int rtlx_open(int index, int can_sleep)
94 int ret = 0; 94 int ret = 0;
95 95
96 if (index >= RTLX_CHANNELS) { 96 if (index >= RTLX_CHANNELS) {
97 pr_debug(KERN_DEBUG "rtlx_open index out of range\n"); 97 pr_debug("rtlx_open index out of range\n");
98 return -ENOSYS; 98 return -ENOSYS;
99 } 99 }
100 100
101 if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { 101 if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
102 pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index); 102 pr_debug("rtlx_open channel %d already opened\n", index);
103 ret = -EBUSY; 103 ret = -EBUSY;
104 goto out_fail; 104 goto out_fail;
105 } 105 }
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index d21ec57b6e95..f3b635f86c39 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -485,7 +485,7 @@ static void __init bootmem_init(void)
485 * NOTE: historically plat_mem_setup did the entire platform initialization. 485 * NOTE: historically plat_mem_setup did the entire platform initialization.
486 * This was rather impractical because it meant plat_mem_setup had to 486 * This was rather impractical because it meant plat_mem_setup had to
487 * get away without any kind of memory allocator. To keep old code from 487 * get away without any kind of memory allocator. To keep old code from
488 * breaking plat_setup was just renamed to plat_setup and a second platform 488 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
489 * initialization hook for anything else was introduced. 489 * initialization hook for anything else was introduced.
490 */ 490 */
491 491
@@ -493,7 +493,7 @@ static int usermem __initdata;
493 493
494static int __init early_parse_mem(char *p) 494static int __init early_parse_mem(char *p)
495{ 495{
496 unsigned long start, size; 496 phys_t start, size;
497 497
498 /* 498 /*
499 * If a user specifies memory size, we 499 * If a user specifies memory size, we
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 1d57605e4615..16f1e4f2bf3c 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -658,13 +658,13 @@ static int signal_setup(void)
658 save_fp_context = _save_fp_context; 658 save_fp_context = _save_fp_context;
659 restore_fp_context = _restore_fp_context; 659 restore_fp_context = _restore_fp_context;
660 } else { 660 } else {
661 save_fp_context = copy_fp_from_sigcontext; 661 save_fp_context = copy_fp_to_sigcontext;
662 restore_fp_context = copy_fp_to_sigcontext; 662 restore_fp_context = copy_fp_from_sigcontext;
663 } 663 }
664#endif /* CONFIG_SMP */ 664#endif /* CONFIG_SMP */
665#else 665#else
666 save_fp_context = copy_fp_from_sigcontext;; 666 save_fp_context = copy_fp_to_sigcontext;
667 restore_fp_context = copy_fp_to_sigcontext; 667 restore_fp_context = copy_fp_from_sigcontext;
668#endif 668#endif
669 669
670 return 0; 670 return 0;
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index 0bb9cc9dc621..d87e03330b29 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o
11# Serial port support 11# Serial port support
12# 12#
13obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 13obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
14obj-$(CONFIG_SERIAL_8250) += serial.o 14loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
15obj-y += $(loongson-serial-m) $(loongson-serial-y)
15obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o 16obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
16obj-$(CONFIG_LOONGSON_MC146818) += rtc.o 17obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
17 18
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index b5f228e7eae6..e3328a96e809 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1872,8 +1872,16 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1872 uasm_l_smp_pgtable_change(l, *p); 1872 uasm_l_smp_pgtable_change(l, *p);
1873#endif 1873#endif
1874 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ 1874 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
1875 if (!m4kc_tlbp_war()) 1875 if (!m4kc_tlbp_war()) {
1876 build_tlb_probe_entry(p); 1876 build_tlb_probe_entry(p);
1877 if (cpu_has_htw) {
1878 /* race condition happens, leaving */
1879 uasm_i_ehb(p);
1880 uasm_i_mfc0(p, wr.r3, C0_INDEX);
1881 uasm_il_bltz(p, r, wr.r3, label_leave);
1882 uasm_i_nop(p);
1883 }
1884 }
1877 return wr; 1885 return wr;
1878} 1886}
1879 1887
diff --git a/arch/mips/mti-sead3/sead3-leds.c b/arch/mips/mti-sead3/sead3-leds.c
index 20102a6d4141..c427c5778186 100644
--- a/arch/mips/mti-sead3/sead3-leds.c
+++ b/arch/mips/mti-sead3/sead3-leds.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */ 7 */
8#include <linux/module.h> 8#include <linux/init.h>
9#include <linux/leds.h> 9#include <linux/leds.h>
10#include <linux/platform_device.h> 10#include <linux/platform_device.h>
11 11
@@ -76,8 +76,4 @@ static int __init led_init(void)
76 return platform_device_register(&fled_device); 76 return platform_device_register(&fled_device);
77} 77}
78 78
79module_init(led_init); 79device_initcall(led_init);
80
81MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
82MODULE_LICENSE("GPL");
83MODULE_DESCRIPTION("LED probe driver for SEAD-3");
diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile
index be358a8050c5..6b43af0a34d9 100644
--- a/arch/mips/netlogic/xlp/Makefile
+++ b/arch/mips/netlogic/xlp/Makefile
@@ -1,6 +1,10 @@
1obj-y += setup.o nlm_hal.o cop2-ex.o dt.o 1obj-y += setup.o nlm_hal.o cop2-ex.o dt.o
2obj-$(CONFIG_SMP) += wakeup.o 2obj-$(CONFIG_SMP) += wakeup.o
3obj-$(CONFIG_USB) += usb-init.o 3ifdef CONFIG_USB
4obj-$(CONFIG_USB) += usb-init-xlp2.o 4obj-y += usb-init.o
5obj-$(CONFIG_SATA_AHCI) += ahci-init.o 5obj-y += usb-init-xlp2.o
6obj-$(CONFIG_SATA_AHCI) += ahci-init-xlp2.o 6endif
7ifdef CONFIG_SATA_AHCI
8obj-y += ahci-init.o
9obj-y += ahci-init-xlp2.o
10endif
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 4ca90a39d6d0..725247beebec 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -159,8 +159,6 @@ struct pci_dn {
159 159
160 int pci_ext_config_space; /* for pci devices */ 160 int pci_ext_config_space; /* for pci devices */
161 161
162 bool force_32bit_msi;
163
164 struct pci_dev *pcidev; /* back-pointer to the pci device */ 162 struct pci_dev *pcidev; /* back-pointer to the pci device */
165#ifdef CONFIG_EEH 163#ifdef CONFIG_EEH
166 struct eeh_dev *edev; /* eeh device */ 164 struct eeh_dev *edev; /* eeh device */
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index f19b1e5cb060..1ceecdda810b 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -65,7 +65,7 @@ static ssize_t eeh_pe_state_show(struct device *dev,
65 return -ENODEV; 65 return -ENODEV;
66 66
67 state = eeh_ops->get_state(edev->pe, NULL); 67 state = eeh_ops->get_state(edev->pe, NULL);
68 return sprintf(buf, "%0x08x %0x08x\n", 68 return sprintf(buf, "0x%08x 0x%08x\n",
69 state, edev->pe->state); 69 state, edev->pe->state);
70} 70}
71 71
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 155013da27e0..b15194e2c5fc 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -266,13 +266,3 @@ int pcibus_to_node(struct pci_bus *bus)
266} 266}
267EXPORT_SYMBOL(pcibus_to_node); 267EXPORT_SYMBOL(pcibus_to_node);
268#endif 268#endif
269
270static void quirk_radeon_32bit_msi(struct pci_dev *dev)
271{
272 struct pci_dn *pdn = pci_get_pdn(dev);
273
274 if (pdn)
275 pdn->force_32bit_msi = true;
276}
277DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
278DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S
index 23eb9a9441bd..c62be60c7274 100644
--- a/arch/powerpc/kernel/vdso32/getcpu.S
+++ b/arch/powerpc/kernel/vdso32/getcpu.S
@@ -30,8 +30,8 @@
30V_FUNCTION_BEGIN(__kernel_getcpu) 30V_FUNCTION_BEGIN(__kernel_getcpu)
31 .cfi_startproc 31 .cfi_startproc
32 mfspr r5,SPRN_SPRG_VDSO_READ 32 mfspr r5,SPRN_SPRG_VDSO_READ
33 cmpdi cr0,r3,0 33 cmpwi cr0,r3,0
34 cmpdi cr1,r4,0 34 cmpwi cr1,r4,0
35 clrlwi r6,r5,16 35 clrlwi r6,r5,16
36 rlwinm r7,r5,16,31-15,31-0 36 rlwinm r7,r5,16,31-15,31-0
37 beq cr0,1f 37 beq cr0,1f
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index 5e1ed1575aab..b322bfb51343 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -57,7 +57,7 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
57 }; 57 };
58 58
59 /* Print things out */ 59 /* Print things out */
60 if (hmi_evt->version != OpalHMIEvt_V1) { 60 if (hmi_evt->version < OpalHMIEvt_V1) {
61 pr_err("HMI Interrupt, Unknown event version %d !\n", 61 pr_err("HMI Interrupt, Unknown event version %d !\n",
62 hmi_evt->version); 62 hmi_evt->version);
63 return; 63 return;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 468a0f23c7f2..3ba435ec3dcd 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1509,7 +1509,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
1509 unsigned int is_64, struct msi_msg *msg) 1509 unsigned int is_64, struct msi_msg *msg)
1510{ 1510{
1511 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); 1511 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
1512 struct pci_dn *pdn = pci_get_pdn(dev);
1513 unsigned int xive_num = hwirq - phb->msi_base; 1512 unsigned int xive_num = hwirq - phb->msi_base;
1514 __be32 data; 1513 __be32 data;
1515 int rc; 1514 int rc;
@@ -1523,7 +1522,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
1523 return -ENXIO; 1522 return -ENXIO;
1524 1523
1525 /* Force 32-bit MSI on some broken devices */ 1524 /* Force 32-bit MSI on some broken devices */
1526 if (pdn && pdn->force_32bit_msi) 1525 if (dev->no_64bit_msi)
1527 is_64 = 0; 1526 is_64 = 0;
1528 1527
1529 /* Assign XIVE to PE */ 1528 /* Assign XIVE to PE */
@@ -1997,7 +1996,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
1997 if (is_kdump_kernel()) { 1996 if (is_kdump_kernel()) {
1998 pr_info(" Issue PHB reset ...\n"); 1997 pr_info(" Issue PHB reset ...\n");
1999 ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); 1998 ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
2000 ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET); 1999 ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
2001 } 2000 }
2002 2001
2003 /* Configure M64 window */ 2002 /* Configure M64 window */
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b2187d0068b8..4b20f2c6b3b2 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -50,7 +50,6 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
50{ 50{
51 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 51 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
52 struct pnv_phb *phb = hose->private_data; 52 struct pnv_phb *phb = hose->private_data;
53 struct pci_dn *pdn = pci_get_pdn(pdev);
54 struct msi_desc *entry; 53 struct msi_desc *entry;
55 struct msi_msg msg; 54 struct msi_msg msg;
56 int hwirq; 55 int hwirq;
@@ -60,7 +59,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
60 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) 59 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
61 return -ENODEV; 60 return -ENODEV;
62 61
63 if (pdn && pdn->force_32bit_msi && !phb->msi32_support) 62 if (pdev->no_64bit_msi && !phb->msi32_support)
64 return -ENODEV; 63 return -ENODEV;
65 64
66 list_for_each_entry(entry, &pdev->msi_list, list) { 65 list_for_each_entry(entry, &pdev->msi_list, list) {
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 8ab5add4ac82..8b909e94fd9a 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -420,7 +420,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
420 */ 420 */
421again: 421again:
422 if (type == PCI_CAP_ID_MSI) { 422 if (type == PCI_CAP_ID_MSI) {
423 if (pdn->force_32bit_msi) { 423 if (pdev->no_64bit_msi) {
424 rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); 424 rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
425 if (rc < 0) { 425 if (rc < 0) {
426 /* 426 /*
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index b988b5addf86..c8efbb37d6e0 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -293,10 +293,10 @@ static inline void disable_surveillance(void)
293 args.token = rtas_token("set-indicator"); 293 args.token = rtas_token("set-indicator");
294 if (args.token == RTAS_UNKNOWN_SERVICE) 294 if (args.token == RTAS_UNKNOWN_SERVICE)
295 return; 295 return;
296 args.nargs = 3; 296 args.nargs = cpu_to_be32(3);
297 args.nret = 1; 297 args.nret = cpu_to_be32(1);
298 args.rets = &args.args[3]; 298 args.rets = &args.args[3];
299 args.args[0] = SURVEILLANCE_TOKEN; 299 args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
300 args.args[1] = 0; 300 args.args[1] = 0;
301 args.args[2] = 0; 301 args.args[2] = 0;
302 enter_rtas(__pa(&args)); 302 enter_rtas(__pa(&args));
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 5b1b52a04ad6..7e064c68c5ec 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -12,6 +12,14 @@ int dma_supported(struct device *dev, u64 mask);
12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14 14
15static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
16 enum dma_data_direction dir)
17{
18 /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
19 * routine can be a nop.
20 */
21}
22
15extern struct dma_map_ops *dma_ops; 23extern struct dma_map_ops *dma_ops;
16extern struct dma_map_ops *leon_dma_ops; 24extern struct dma_map_ops *leon_dma_ops;
17extern struct dma_map_ops pci32_dma_ops; 25extern struct dma_map_ops pci32_dma_ops;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0bb1335313b2..aede2c347bde 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -189,6 +189,11 @@
189#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ 189#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
190#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 190#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
191#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 191#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
192#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
193#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
194#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
195#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
196#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
192 197
193/* Virtualization flags: Linux defined, word 8 */ 198/* Virtualization flags: Linux defined, word 8 */
194#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 199#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index e21331ce368f..62838e54947d 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -152,6 +152,45 @@
152#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 152#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
153#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 153#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
154 154
155/* Hardware P state interface */
156#define MSR_PPERF 0x0000064e
157#define MSR_PERF_LIMIT_REASONS 0x0000064f
158#define MSR_PM_ENABLE 0x00000770
159#define MSR_HWP_CAPABILITIES 0x00000771
160#define MSR_HWP_REQUEST_PKG 0x00000772
161#define MSR_HWP_INTERRUPT 0x00000773
162#define MSR_HWP_REQUEST 0x00000774
163#define MSR_HWP_STATUS 0x00000777
164
165/* CPUID.6.EAX */
166#define HWP_BASE_BIT (1<<7)
167#define HWP_NOTIFICATIONS_BIT (1<<8)
168#define HWP_ACTIVITY_WINDOW_BIT (1<<9)
169#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10)
170#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11)
171
172/* IA32_HWP_CAPABILITIES */
173#define HWP_HIGHEST_PERF(x) (x & 0xff)
174#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8)
175#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16)
176#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24)
177
178/* IA32_HWP_REQUEST */
179#define HWP_MIN_PERF(x) (x & 0xff)
180#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
181#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16)
182#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24)
183#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32)
184#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42)
185
186/* IA32_HWP_STATUS */
187#define HWP_GUARANTEED_CHANGE(x) (x & 0x1)
188#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4)
189
190/* IA32_HWP_INTERRUPT */
191#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1)
192#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2)
193
155#define MSR_AMD64_MC0_MASK 0xc0010044 194#define MSR_AMD64_MC0_MASK 0xc0010044
156 195
157#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) 196#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
@@ -345,6 +384,8 @@
345 384
346#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 385#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
347 386
387#define MSR_MISC_PWR_MGMT 0x000001aa
388
348#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 389#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
349#define ENERGY_PERF_BIAS_PERFORMANCE 0 390#define ENERGY_PERF_BIAS_PERFORMANCE 0
350#define ENERGY_PERF_BIAS_NORMAL 6 391#define ENERGY_PERF_BIAS_NORMAL 6
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 4a8013d55947..60639093d536 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -36,6 +36,11 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, 37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, 38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
39 { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
40 { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 },
41 { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
42 { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
43 { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
39 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, 44 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
40 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, 45 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
41 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, 46 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ac1c4de3a484..978f402006ee 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
630 * kvm mmu, before reclaiming the page, we should 630 * kvm mmu, before reclaiming the page, we should
631 * unmap it from mmu first. 631 * unmap it from mmu first.
632 */ 632 */
633 WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn))); 633 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
634 634
635 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 635 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
636 kvm_set_pfn_accessed(pfn); 636 kvm_set_pfn_accessed(pfn);
@@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2461 spte |= PT_PAGE_SIZE_MASK; 2461 spte |= PT_PAGE_SIZE_MASK;
2462 if (tdp_enabled) 2462 if (tdp_enabled)
2463 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 2463 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2464 kvm_is_mmio_pfn(pfn)); 2464 kvm_is_reserved_pfn(pfn));
2465 2465
2466 if (host_writable) 2466 if (host_writable)
2467 spte |= SPTE_HOST_WRITEABLE; 2467 spte |= SPTE_HOST_WRITEABLE;
@@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2737 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done 2737 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2738 * here. 2738 * here.
2739 */ 2739 */
2740 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && 2740 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
2741 level == PT_PAGE_TABLE_LEVEL && 2741 level == PT_PAGE_TABLE_LEVEL &&
2742 PageTransCompound(pfn_to_page(pfn)) && 2742 PageTransCompound(pfn_to_page(pfn)) &&
2743 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { 2743 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 7652e8dc188f..21b0bc6a9c96 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1225,11 +1225,13 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
1225 card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE); 1225 card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE);
1226 if (!card->config_regs) { 1226 if (!card->config_regs) {
1227 dev_warn(&dev->dev, "Failed to ioremap config registers\n"); 1227 dev_warn(&dev->dev, "Failed to ioremap config registers\n");
1228 err = -ENOMEM;
1228 goto out_release_regions; 1229 goto out_release_regions;
1229 } 1230 }
1230 card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE); 1231 card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE);
1231 if (!card->buffers) { 1232 if (!card->buffers) {
1232 dev_warn(&dev->dev, "Failed to ioremap data buffers\n"); 1233 dev_warn(&dev->dev, "Failed to ioremap data buffers\n");
1234 err = -ENOMEM;
1233 goto out_unmap_config; 1235 goto out_unmap_config;
1234 } 1236 }
1235 1237
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 24b5b020753a..a23ac0c724f0 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -52,29 +52,26 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
52 52
53 tmp = pmc_read(pmc, AT91_PMC_USB); 53 tmp = pmc_read(pmc, AT91_PMC_USB);
54 usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT; 54 usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
55 return parent_rate / (usbdiv + 1); 55
56 return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
56} 57}
57 58
58static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, 59static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
59 unsigned long *parent_rate) 60 unsigned long *parent_rate)
60{ 61{
61 unsigned long div; 62 unsigned long div;
62 unsigned long bestrate; 63
63 unsigned long tmp; 64 if (!rate)
65 return -EINVAL;
64 66
65 if (rate >= *parent_rate) 67 if (rate >= *parent_rate)
66 return *parent_rate; 68 return *parent_rate;
67 69
68 div = *parent_rate / rate; 70 div = DIV_ROUND_CLOSEST(*parent_rate, rate);
69 if (div >= SAM9X5_USB_MAX_DIV) 71 if (div > SAM9X5_USB_MAX_DIV + 1)
70 return *parent_rate / (SAM9X5_USB_MAX_DIV + 1); 72 div = SAM9X5_USB_MAX_DIV + 1;
71
72 bestrate = *parent_rate / div;
73 tmp = *parent_rate / (div + 1);
74 if (bestrate - rate > rate - tmp)
75 bestrate = tmp;
76 73
77 return bestrate; 74 return DIV_ROUND_CLOSEST(*parent_rate, div);
78} 75}
79 76
80static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) 77static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
@@ -106,9 +103,13 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
106 u32 tmp; 103 u32 tmp;
107 struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); 104 struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
108 struct at91_pmc *pmc = usb->pmc; 105 struct at91_pmc *pmc = usb->pmc;
109 unsigned long div = parent_rate / rate; 106 unsigned long div;
107
108 if (!rate)
109 return -EINVAL;
110 110
111 if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV) 111 div = DIV_ROUND_CLOSEST(parent_rate, rate);
112 if (div > SAM9X5_USB_MAX_DIV + 1 || !div)
112 return -EINVAL; 113 return -EINVAL;
113 114
114 tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV; 115 tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
@@ -253,7 +254,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
253 254
254 tmp_parent_rate = rate * usb->divisors[i]; 255 tmp_parent_rate = rate * usb->divisors[i];
255 tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate); 256 tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate);
256 tmprate = tmp_parent_rate / usb->divisors[i]; 257 tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
257 if (tmprate < rate) 258 if (tmprate < rate)
258 tmpdiff = rate - tmprate; 259 tmpdiff = rate - tmprate;
259 else 260 else
@@ -281,10 +282,10 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
281 struct at91_pmc *pmc = usb->pmc; 282 struct at91_pmc *pmc = usb->pmc;
282 unsigned long div; 283 unsigned long div;
283 284
284 if (!rate || parent_rate % rate) 285 if (!rate)
285 return -EINVAL; 286 return -EINVAL;
286 287
287 div = parent_rate / rate; 288 div = DIV_ROUND_CLOSEST(parent_rate, rate);
288 289
289 for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) { 290 for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
290 if (usb->divisors[i] == div) { 291 if (usb->divisors[i] == div) {
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 18a9de29df0e..c0a842b335c5 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -263,6 +263,14 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
263 if (!rate) 263 if (!rate)
264 rate = 1; 264 rate = 1;
265 265
266 /* if read only, just return current value */
267 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
268 bestdiv = readl(divider->reg) >> divider->shift;
269 bestdiv &= div_mask(divider);
270 bestdiv = _get_div(divider, bestdiv);
271 return bestdiv;
272 }
273
266 maxdiv = _get_maxdiv(divider); 274 maxdiv = _get_maxdiv(divider);
267 275
268 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { 276 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
@@ -361,11 +369,6 @@ const struct clk_ops clk_divider_ops = {
361}; 369};
362EXPORT_SYMBOL_GPL(clk_divider_ops); 370EXPORT_SYMBOL_GPL(clk_divider_ops);
363 371
364const struct clk_ops clk_divider_ro_ops = {
365 .recalc_rate = clk_divider_recalc_rate,
366};
367EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
368
369static struct clk *_register_divider(struct device *dev, const char *name, 372static struct clk *_register_divider(struct device *dev, const char *name,
370 const char *parent_name, unsigned long flags, 373 const char *parent_name, unsigned long flags,
371 void __iomem *reg, u8 shift, u8 width, 374 void __iomem *reg, u8 shift, u8 width,
@@ -391,10 +394,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
391 } 394 }
392 395
393 init.name = name; 396 init.name = name;
394 if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) 397 init.ops = &clk_divider_ops;
395 init.ops = &clk_divider_ro_ops;
396 else
397 init.ops = &clk_divider_ops;
398 init.flags = flags | CLK_IS_BASIC; 398 init.flags = flags | CLK_IS_BASIC;
399 init.parent_names = (parent_name ? &parent_name: NULL); 399 init.parent_names = (parent_name ? &parent_name: NULL);
400 init.num_parents = (parent_name ? 1 : 0); 400 init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index b345cc791e5d..88b9fe13fa44 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -322,7 +322,7 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
322 unsigned long ccsr = CCSR; 322 unsigned long ccsr = CCSR;
323 323
324 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT); 324 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
325 a = cccr & CCCR_A_BIT; 325 a = cccr & (1 << CCCR_A_BIT);
326 l = ccsr & CCSR_L_MASK; 326 l = ccsr & CCSR_L_MASK;
327 327
328 if (osc_forced || a) 328 if (osc_forced || a)
@@ -341,7 +341,7 @@ static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
341 unsigned long ccsr = CCSR; 341 unsigned long ccsr = CCSR;
342 342
343 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT); 343 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
344 a = cccr & CCCR_A_BIT; 344 a = cccr & (1 << CCCR_A_BIT);
345 if (osc_forced) 345 if (osc_forced)
346 return PXA_MEM_13Mhz; 346 return PXA_MEM_13Mhz;
347 if (a) 347 if (a)
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index dab988ab8cf1..157139a5c1ca 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3122,7 +3122,7 @@ static struct clk_regmap *mmcc_apq8084_clocks[] = {
3122 [ESC1_CLK_SRC] = &esc1_clk_src.clkr, 3122 [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
3123 [HDMI_CLK_SRC] = &hdmi_clk_src.clkr, 3123 [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
3124 [VSYNC_CLK_SRC] = &vsync_clk_src.clkr, 3124 [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
3125 [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, 3125 [MMSS_RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
3126 [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr, 3126 [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
3127 [MAPLE_CLK_SRC] = &maple_clk_src.clkr, 3127 [MAPLE_CLK_SRC] = &maple_clk_src.clkr,
3128 [VDP_CLK_SRC] = &vdp_clk_src.clkr, 3128 [VDP_CLK_SRC] = &vdp_clk_src.clkr,
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 1e68bff481b8..880a266f0143 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -90,9 +90,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
90 div->width = div_width; 90 div->width = div_width;
91 div->lock = lock; 91 div->lock = lock;
92 div->table = div_table; 92 div->table = div_table;
93 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) 93 div_ops = &clk_divider_ops;
94 ? &clk_divider_ro_ops
95 : &clk_divider_ops;
96 } 94 }
97 95
98 clk = clk_register_composite(NULL, name, parent_names, num_parents, 96 clk = clk_register_composite(NULL, name, parent_names, num_parents,
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 3489f8f5fada..29b2ef5a68b9 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -63,7 +63,6 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
63 63
64config CPU_FREQ_DEFAULT_GOV_POWERSAVE 64config CPU_FREQ_DEFAULT_GOV_POWERSAVE
65 bool "powersave" 65 bool "powersave"
66 depends on EXPERT
67 select CPU_FREQ_GOV_POWERSAVE 66 select CPU_FREQ_GOV_POWERSAVE
68 help 67 help
69 Use the CPUFreq governor 'powersave' as default. This sets 68 Use the CPUFreq governor 'powersave' as default. This sets
@@ -183,6 +182,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
183 182
184 If in doubt, say N. 183 If in doubt, say N.
185 184
185comment "CPU frequency scaling drivers"
186
186config CPUFREQ_DT 187config CPUFREQ_DT
187 tristate "Generic DT based cpufreq driver" 188 tristate "Generic DT based cpufreq driver"
188 depends on HAVE_CLK && OF 189 depends on HAVE_CLK && OF
@@ -196,19 +197,19 @@ config CPUFREQ_DT
196 197
197 If in doubt, say N. 198 If in doubt, say N.
198 199
199menu "x86 CPU frequency scaling drivers" 200if X86
200depends on X86
201source "drivers/cpufreq/Kconfig.x86" 201source "drivers/cpufreq/Kconfig.x86"
202endmenu 202endif
203 203
204menu "ARM CPU frequency scaling drivers" 204if ARM || ARM64
205depends on ARM || ARM64
206source "drivers/cpufreq/Kconfig.arm" 205source "drivers/cpufreq/Kconfig.arm"
207endmenu 206endif
208 207
209menu "AVR32 CPU frequency scaling drivers" 208if PPC32 || PPC64
210depends on AVR32 209source "drivers/cpufreq/Kconfig.powerpc"
210endif
211 211
212if AVR32
212config AVR32_AT32AP_CPUFREQ 213config AVR32_AT32AP_CPUFREQ
213 bool "CPU frequency driver for AT32AP" 214 bool "CPU frequency driver for AT32AP"
214 depends on PLATFORM_AT32AP 215 depends on PLATFORM_AT32AP
@@ -216,12 +217,9 @@ config AVR32_AT32AP_CPUFREQ
216 help 217 help
217 This enables the CPU frequency driver for AT32AP processors. 218 This enables the CPU frequency driver for AT32AP processors.
218 If in doubt, say N. 219 If in doubt, say N.
220endif
219 221
220endmenu 222if IA64
221
222menu "CPUFreq processor drivers"
223depends on IA64
224
225config IA64_ACPI_CPUFREQ 223config IA64_ACPI_CPUFREQ
226 tristate "ACPI Processor P-States driver" 224 tristate "ACPI Processor P-States driver"
227 depends on ACPI_PROCESSOR 225 depends on ACPI_PROCESSOR
@@ -232,12 +230,9 @@ config IA64_ACPI_CPUFREQ
232 For details, take a look at <file:Documentation/cpu-freq/>. 230 For details, take a look at <file:Documentation/cpu-freq/>.
233 231
234 If in doubt, say N. 232 If in doubt, say N.
233endif
235 234
236endmenu 235if MIPS
237
238menu "MIPS CPUFreq processor drivers"
239depends on MIPS
240
241config LOONGSON2_CPUFREQ 236config LOONGSON2_CPUFREQ
242 tristate "Loongson2 CPUFreq Driver" 237 tristate "Loongson2 CPUFreq Driver"
243 help 238 help
@@ -250,15 +245,18 @@ config LOONGSON2_CPUFREQ
250 245
251 If in doubt, say N. 246 If in doubt, say N.
252 247
253endmenu 248config LOONGSON1_CPUFREQ
249 tristate "Loongson1 CPUFreq Driver"
250 help
251 This option adds a CPUFreq driver for loongson1 processors which
252 support software configurable cpu frequency.
254 253
255menu "PowerPC CPU frequency scaling drivers" 254 For details, take a look at <file:Documentation/cpu-freq/>.
256depends on PPC32 || PPC64
257source "drivers/cpufreq/Kconfig.powerpc"
258endmenu
259 255
260menu "SPARC CPU frequency scaling drivers" 256 If in doubt, say N.
261depends on SPARC64 257endif
258
259if SPARC64
262config SPARC_US3_CPUFREQ 260config SPARC_US3_CPUFREQ
263 tristate "UltraSPARC-III CPU Frequency driver" 261 tristate "UltraSPARC-III CPU Frequency driver"
264 help 262 help
@@ -276,10 +274,9 @@ config SPARC_US2E_CPUFREQ
276 For details, take a look at <file:Documentation/cpu-freq>. 274 For details, take a look at <file:Documentation/cpu-freq>.
277 275
278 If in doubt, say N. 276 If in doubt, say N.
279endmenu 277endif
280 278
281menu "SH CPU Frequency scaling" 279if SUPERH
282depends on SUPERH
283config SH_CPU_FREQ 280config SH_CPU_FREQ
284 tristate "SuperH CPU Frequency driver" 281 tristate "SuperH CPU Frequency driver"
285 help 282 help
@@ -293,7 +290,7 @@ config SH_CPU_FREQ
293 For details, take a look at <file:Documentation/cpu-freq>. 290 For details, take a look at <file:Documentation/cpu-freq>.
294 291
295 If unsure, say N. 292 If unsure, say N.
296endmenu 293endif
297 294
298endif 295endif
299endmenu 296endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 83a75dc84761..0f9a2c3c0e0d 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -247,3 +247,11 @@ config ARM_TEGRA_CPUFREQ
247 default y 247 default y
248 help 248 help
249 This adds the CPUFreq driver support for TEGRA SOCs. 249 This adds the CPUFreq driver support for TEGRA SOCs.
250
251config ARM_PXA2xx_CPUFREQ
252 tristate "Intel PXA2xx CPUfreq driver"
253 depends on PXA27x || PXA25x
254 help
255 This add the CPUFreq driver support for Intel PXA2xx SOCs.
256
257 If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 40c53dc1937e..b3ca7b0b2c33 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -61,8 +61,7 @@ obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
61obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o 61obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
62obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o 62obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
63obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o 63obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
64obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o 64obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
65obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o
66obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o 65obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
67obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o 66obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
68obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o 67obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
@@ -98,6 +97,7 @@ obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
98obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o 97obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
99obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o 98obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
100obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o 99obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
100obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o
101obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o 101obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
102obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o 102obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
103obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o 103obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index a46c223c2506..e1a6ba66a7f5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -289,6 +289,8 @@ static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
289 289
290 clk_put(clk[cluster]); 290 clk_put(clk[cluster]);
291 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); 291 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
292 if (arm_bL_ops->free_opp_table)
293 arm_bL_ops->free_opp_table(cpu_dev);
292 dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); 294 dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
293} 295}
294 296
@@ -337,7 +339,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
337 if (ret) { 339 if (ret) {
338 dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", 340 dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
339 __func__, cpu_dev->id, ret); 341 __func__, cpu_dev->id, ret);
340 goto out; 342 goto free_opp_table;
341 } 343 }
342 344
343 name[12] = cluster + '0'; 345 name[12] = cluster + '0';
@@ -354,6 +356,9 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
354 ret = PTR_ERR(clk[cluster]); 356 ret = PTR_ERR(clk[cluster]);
355 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); 357 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
356 358
359free_opp_table:
360 if (arm_bL_ops->free_opp_table)
361 arm_bL_ops->free_opp_table(cpu_dev);
357out: 362out:
358 dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, 363 dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
359 cluster); 364 cluster);
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 70f18fc12d4a..a211f7db9d32 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -25,13 +25,16 @@
25 25
26struct cpufreq_arm_bL_ops { 26struct cpufreq_arm_bL_ops {
27 char name[CPUFREQ_NAME_LEN]; 27 char name[CPUFREQ_NAME_LEN];
28 int (*get_transition_latency)(struct device *cpu_dev);
29 28
30 /* 29 /*
31 * This must set opp table for cpu_dev in a similar way as done by 30 * This must set opp table for cpu_dev in a similar way as done by
32 * of_init_opp_table(). 31 * of_init_opp_table().
33 */ 32 */
34 int (*init_opp_table)(struct device *cpu_dev); 33 int (*init_opp_table)(struct device *cpu_dev);
34
35 /* Optional */
36 int (*get_transition_latency)(struct device *cpu_dev);
37 void (*free_opp_table)(struct device *cpu_dev);
35}; 38};
36 39
37int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); 40int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 4550f6976768..ef0b3f1324d5 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -82,6 +82,7 @@ static struct cpufreq_arm_bL_ops dt_bL_ops = {
82 .name = "dt-bl", 82 .name = "dt-bl",
83 .get_transition_latency = dt_get_transition_latency, 83 .get_transition_latency = dt_get_transition_latency,
84 .init_opp_table = dt_init_opp_table, 84 .init_opp_table = dt_init_opp_table,
85 .free_opp_table = of_free_opp_table,
85}; 86};
86 87
87static int generic_bL_probe(struct platform_device *pdev) 88static int generic_bL_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f657c571b18e..9bc2720628a4 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -58,6 +58,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
58 old_freq = clk_get_rate(cpu_clk) / 1000; 58 old_freq = clk_get_rate(cpu_clk) / 1000;
59 59
60 if (!IS_ERR(cpu_reg)) { 60 if (!IS_ERR(cpu_reg)) {
61 unsigned long opp_freq;
62
61 rcu_read_lock(); 63 rcu_read_lock();
62 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); 64 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
63 if (IS_ERR(opp)) { 65 if (IS_ERR(opp)) {
@@ -67,13 +69,16 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
67 return PTR_ERR(opp); 69 return PTR_ERR(opp);
68 } 70 }
69 volt = dev_pm_opp_get_voltage(opp); 71 volt = dev_pm_opp_get_voltage(opp);
72 opp_freq = dev_pm_opp_get_freq(opp);
70 rcu_read_unlock(); 73 rcu_read_unlock();
71 tol = volt * priv->voltage_tolerance / 100; 74 tol = volt * priv->voltage_tolerance / 100;
72 volt_old = regulator_get_voltage(cpu_reg); 75 volt_old = regulator_get_voltage(cpu_reg);
76 dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
77 opp_freq / 1000, volt);
73 } 78 }
74 79
75 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", 80 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
76 old_freq / 1000, volt_old ? volt_old / 1000 : -1, 81 old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
77 new_freq / 1000, volt ? volt / 1000 : -1); 82 new_freq / 1000, volt ? volt / 1000 : -1);
78 83
79 /* scaling up? scale voltage before frequency */ 84 /* scaling up? scale voltage before frequency */
@@ -89,7 +94,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
89 ret = clk_set_rate(cpu_clk, freq_exact); 94 ret = clk_set_rate(cpu_clk, freq_exact);
90 if (ret) { 95 if (ret) {
91 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); 96 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
92 if (!IS_ERR(cpu_reg)) 97 if (!IS_ERR(cpu_reg) && volt_old > 0)
93 regulator_set_voltage_tol(cpu_reg, volt_old, tol); 98 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
94 return ret; 99 return ret;
95 } 100 }
@@ -181,7 +186,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
181{ 186{
182 struct cpufreq_dt_platform_data *pd; 187 struct cpufreq_dt_platform_data *pd;
183 struct cpufreq_frequency_table *freq_table; 188 struct cpufreq_frequency_table *freq_table;
184 struct thermal_cooling_device *cdev;
185 struct device_node *np; 189 struct device_node *np;
186 struct private_data *priv; 190 struct private_data *priv;
187 struct device *cpu_dev; 191 struct device *cpu_dev;
@@ -210,7 +214,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
210 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 214 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
211 if (!priv) { 215 if (!priv) {
212 ret = -ENOMEM; 216 ret = -ENOMEM;
213 goto out_put_node; 217 goto out_free_opp;
214 } 218 }
215 219
216 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); 220 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
@@ -264,20 +268,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
264 goto out_free_priv; 268 goto out_free_priv;
265 } 269 }
266 270
267 /*
268 * For now, just loading the cooling device;
269 * thermal DT code takes care of matching them.
270 */
271 if (of_find_property(np, "#cooling-cells", NULL)) {
272 cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
273 if (IS_ERR(cdev))
274 dev_err(cpu_dev,
275 "running cpufreq without cooling device: %ld\n",
276 PTR_ERR(cdev));
277 else
278 priv->cdev = cdev;
279 }
280
281 priv->cpu_dev = cpu_dev; 271 priv->cpu_dev = cpu_dev;
282 priv->cpu_reg = cpu_reg; 272 priv->cpu_reg = cpu_reg;
283 policy->driver_data = priv; 273 policy->driver_data = priv;
@@ -287,7 +277,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
287 if (ret) { 277 if (ret) {
288 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, 278 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
289 ret); 279 ret);
290 goto out_cooling_unregister; 280 goto out_free_cpufreq_table;
291 } 281 }
292 282
293 policy->cpuinfo.transition_latency = transition_latency; 283 policy->cpuinfo.transition_latency = transition_latency;
@@ -300,12 +290,12 @@ static int cpufreq_init(struct cpufreq_policy *policy)
300 290
301 return 0; 291 return 0;
302 292
303out_cooling_unregister: 293out_free_cpufreq_table:
304 cpufreq_cooling_unregister(priv->cdev);
305 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 294 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
306out_free_priv: 295out_free_priv:
307 kfree(priv); 296 kfree(priv);
308out_put_node: 297out_free_opp:
298 of_free_opp_table(cpu_dev);
309 of_node_put(np); 299 of_node_put(np);
310out_put_reg_clk: 300out_put_reg_clk:
311 clk_put(cpu_clk); 301 clk_put(cpu_clk);
@@ -319,8 +309,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
319{ 309{
320 struct private_data *priv = policy->driver_data; 310 struct private_data *priv = policy->driver_data;
321 311
322 cpufreq_cooling_unregister(priv->cdev); 312 if (priv->cdev)
313 cpufreq_cooling_unregister(priv->cdev);
323 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 314 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
315 of_free_opp_table(priv->cpu_dev);
324 clk_put(policy->clk); 316 clk_put(policy->clk);
325 if (!IS_ERR(priv->cpu_reg)) 317 if (!IS_ERR(priv->cpu_reg))
326 regulator_put(priv->cpu_reg); 318 regulator_put(priv->cpu_reg);
@@ -329,6 +321,33 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
329 return 0; 321 return 0;
330} 322}
331 323
324static void cpufreq_ready(struct cpufreq_policy *policy)
325{
326 struct private_data *priv = policy->driver_data;
327 struct device_node *np = of_node_get(priv->cpu_dev->of_node);
328
329 if (WARN_ON(!np))
330 return;
331
332 /*
333 * For now, just loading the cooling device;
334 * thermal DT code takes care of matching them.
335 */
336 if (of_find_property(np, "#cooling-cells", NULL)) {
337 priv->cdev = of_cpufreq_cooling_register(np,
338 policy->related_cpus);
339 if (IS_ERR(priv->cdev)) {
340 dev_err(priv->cpu_dev,
341 "running cpufreq without cooling device: %ld\n",
342 PTR_ERR(priv->cdev));
343
344 priv->cdev = NULL;
345 }
346 }
347
348 of_node_put(np);
349}
350
332static struct cpufreq_driver dt_cpufreq_driver = { 351static struct cpufreq_driver dt_cpufreq_driver = {
333 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, 352 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
334 .verify = cpufreq_generic_frequency_table_verify, 353 .verify = cpufreq_generic_frequency_table_verify,
@@ -336,6 +355,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
336 .get = cpufreq_generic_get, 355 .get = cpufreq_generic_get,
337 .init = cpufreq_init, 356 .init = cpufreq_init,
338 .exit = cpufreq_exit, 357 .exit = cpufreq_exit,
358 .ready = cpufreq_ready,
339 .name = "cpufreq-dt", 359 .name = "cpufreq-dt",
340 .attr = cpufreq_generic_attr, 360 .attr = cpufreq_generic_attr,
341}; 361};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 4473eba1d6b0..a09a29c312a9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -535,7 +535,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
535static ssize_t store_##file_name \ 535static ssize_t store_##file_name \
536(struct cpufreq_policy *policy, const char *buf, size_t count) \ 536(struct cpufreq_policy *policy, const char *buf, size_t count) \
537{ \ 537{ \
538 int ret; \ 538 int ret, temp; \
539 struct cpufreq_policy new_policy; \ 539 struct cpufreq_policy new_policy; \
540 \ 540 \
541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@@ -546,8 +546,10 @@ static ssize_t store_##file_name \
546 if (ret != 1) \ 546 if (ret != 1) \
547 return -EINVAL; \ 547 return -EINVAL; \
548 \ 548 \
549 temp = new_policy.object; \
549 ret = cpufreq_set_policy(policy, &new_policy); \ 550 ret = cpufreq_set_policy(policy, &new_policy); \
550 policy->user_policy.object = policy->object; \ 551 if (!ret) \
552 policy->user_policy.object = temp; \
551 \ 553 \
552 return ret ? ret : count; \ 554 return ret ? ret : count; \
553} 555}
@@ -898,46 +900,31 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
898 struct freq_attr **drv_attr; 900 struct freq_attr **drv_attr;
899 int ret = 0; 901 int ret = 0;
900 902
901 /* prepare interface data */
902 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
903 &dev->kobj, "cpufreq");
904 if (ret)
905 return ret;
906
907 /* set up files for this cpu device */ 903 /* set up files for this cpu device */
908 drv_attr = cpufreq_driver->attr; 904 drv_attr = cpufreq_driver->attr;
909 while ((drv_attr) && (*drv_attr)) { 905 while ((drv_attr) && (*drv_attr)) {
910 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 906 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
911 if (ret) 907 if (ret)
912 goto err_out_kobj_put; 908 return ret;
913 drv_attr++; 909 drv_attr++;
914 } 910 }
915 if (cpufreq_driver->get) { 911 if (cpufreq_driver->get) {
916 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 912 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
917 if (ret) 913 if (ret)
918 goto err_out_kobj_put; 914 return ret;
919 } 915 }
920 916
921 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 917 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
922 if (ret) 918 if (ret)
923 goto err_out_kobj_put; 919 return ret;
924 920
925 if (cpufreq_driver->bios_limit) { 921 if (cpufreq_driver->bios_limit) {
926 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 922 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
927 if (ret) 923 if (ret)
928 goto err_out_kobj_put; 924 return ret;
929 } 925 }
930 926
931 ret = cpufreq_add_dev_symlink(policy); 927 return cpufreq_add_dev_symlink(policy);
932 if (ret)
933 goto err_out_kobj_put;
934
935 return ret;
936
937err_out_kobj_put:
938 kobject_put(&policy->kobj);
939 wait_for_completion(&policy->kobj_unregister);
940 return ret;
941} 928}
942 929
943static void cpufreq_init_policy(struct cpufreq_policy *policy) 930static void cpufreq_init_policy(struct cpufreq_policy *policy)
@@ -1196,6 +1183,8 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1196 goto err_set_policy_cpu; 1183 goto err_set_policy_cpu;
1197 } 1184 }
1198 1185
1186 down_write(&policy->rwsem);
1187
1199 /* related cpus should atleast have policy->cpus */ 1188 /* related cpus should atleast have policy->cpus */
1200 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1189 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1201 1190
@@ -1208,9 +1197,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1208 if (!recover_policy) { 1197 if (!recover_policy) {
1209 policy->user_policy.min = policy->min; 1198 policy->user_policy.min = policy->min;
1210 policy->user_policy.max = policy->max; 1199 policy->user_policy.max = policy->max;
1200
1201 /* prepare interface data */
1202 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1203 &dev->kobj, "cpufreq");
1204 if (ret) {
1205 pr_err("%s: failed to init policy->kobj: %d\n",
1206 __func__, ret);
1207 goto err_init_policy_kobj;
1208 }
1211 } 1209 }
1212 1210
1213 down_write(&policy->rwsem);
1214 write_lock_irqsave(&cpufreq_driver_lock, flags); 1211 write_lock_irqsave(&cpufreq_driver_lock, flags);
1215 for_each_cpu(j, policy->cpus) 1212 for_each_cpu(j, policy->cpus)
1216 per_cpu(cpufreq_cpu_data, j) = policy; 1213 per_cpu(cpufreq_cpu_data, j) = policy;
@@ -1288,8 +1285,13 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1288 up_write(&policy->rwsem); 1285 up_write(&policy->rwsem);
1289 1286
1290 kobject_uevent(&policy->kobj, KOBJ_ADD); 1287 kobject_uevent(&policy->kobj, KOBJ_ADD);
1288
1291 up_read(&cpufreq_rwsem); 1289 up_read(&cpufreq_rwsem);
1292 1290
1291 /* Callback for handling stuff after policy is ready */
1292 if (cpufreq_driver->ready)
1293 cpufreq_driver->ready(policy);
1294
1293 pr_debug("initialization complete\n"); 1295 pr_debug("initialization complete\n");
1294 1296
1295 return 0; 1297 return 0;
@@ -1301,6 +1303,11 @@ err_get_freq:
1301 per_cpu(cpufreq_cpu_data, j) = NULL; 1303 per_cpu(cpufreq_cpu_data, j) = NULL;
1302 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1304 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1303 1305
1306 if (!recover_policy) {
1307 kobject_put(&policy->kobj);
1308 wait_for_completion(&policy->kobj_unregister);
1309 }
1310err_init_policy_kobj:
1304 up_write(&policy->rwsem); 1311 up_write(&policy->rwsem);
1305 1312
1306 if (cpufreq_driver->exit) 1313 if (cpufreq_driver->exit)
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index f33f25b483ca..27a57ed9eb2c 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -371,7 +371,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
371 if (ret) { 371 if (ret) {
372 dev_err(dvfs_info->dev, 372 dev_err(dvfs_info->dev,
373 "failed to init cpufreq table: %d\n", ret); 373 "failed to init cpufreq table: %d\n", ret);
374 goto err_put_node; 374 goto err_free_opp;
375 } 375 }
376 dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev); 376 dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
377 exynos_sort_descend_freq_table(); 377 exynos_sort_descend_freq_table();
@@ -423,6 +423,8 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
423 423
424err_free_table: 424err_free_table:
425 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); 425 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
426err_free_opp:
427 of_free_opp_table(dvfs_info->dev);
426err_put_node: 428err_put_node:
427 of_node_put(np); 429 of_node_put(np);
428 dev_err(&pdev->dev, "%s: failed initialization\n", __func__); 430 dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
@@ -433,6 +435,7 @@ static int exynos_cpufreq_remove(struct platform_device *pdev)
433{ 435{
434 cpufreq_unregister_driver(&exynos_driver); 436 cpufreq_unregister_driver(&exynos_driver);
435 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); 437 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
438 of_free_opp_table(dvfs_info->dev);
436 return 0; 439 return 0;
437} 440}
438 441
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index c2d30765bf3d..5da1d131f770 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -31,6 +31,7 @@ static struct clk *step_clk;
31static struct clk *pll2_pfd2_396m_clk; 31static struct clk *pll2_pfd2_396m_clk;
32 32
33static struct device *cpu_dev; 33static struct device *cpu_dev;
34static bool free_opp;
34static struct cpufreq_frequency_table *freq_table; 35static struct cpufreq_frequency_table *freq_table;
35static unsigned int transition_latency; 36static unsigned int transition_latency;
36 37
@@ -207,11 +208,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
207 goto put_reg; 208 goto put_reg;
208 } 209 }
209 210
211 /* Because we have added the OPPs here, we must free them */
212 free_opp = true;
213
210 num = dev_pm_opp_get_opp_count(cpu_dev); 214 num = dev_pm_opp_get_opp_count(cpu_dev);
211 if (num < 0) { 215 if (num < 0) {
212 ret = num; 216 ret = num;
213 dev_err(cpu_dev, "no OPP table is found: %d\n", ret); 217 dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
214 goto put_reg; 218 goto out_free_opp;
215 } 219 }
216 } 220 }
217 221
@@ -306,6 +310,9 @@ soc_opp_out:
306 310
307free_freq_table: 311free_freq_table:
308 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 312 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
313out_free_opp:
314 if (free_opp)
315 of_free_opp_table(cpu_dev);
309put_reg: 316put_reg:
310 if (!IS_ERR(arm_reg)) 317 if (!IS_ERR(arm_reg))
311 regulator_put(arm_reg); 318 regulator_put(arm_reg);
@@ -332,6 +339,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
332{ 339{
333 cpufreq_unregister_driver(&imx6q_cpufreq_driver); 340 cpufreq_unregister_driver(&imx6q_cpufreq_driver);
334 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 341 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
342 if (free_opp)
343 of_free_opp_table(cpu_dev);
335 regulator_put(arm_reg); 344 regulator_put(arm_reg);
336 if (!IS_ERR(pu_reg)) 345 if (!IS_ERR(pu_reg))
337 regulator_put(pu_reg); 346 regulator_put(pu_reg);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 27bb6d3877ed..742eefba12c2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -137,6 +137,7 @@ struct cpu_defaults {
137 137
138static struct pstate_adjust_policy pid_params; 138static struct pstate_adjust_policy pid_params;
139static struct pstate_funcs pstate_funcs; 139static struct pstate_funcs pstate_funcs;
140static int hwp_active;
140 141
141struct perf_limits { 142struct perf_limits {
142 int no_turbo; 143 int no_turbo;
@@ -198,7 +199,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
198 199
199 pid->integral += fp_error; 200 pid->integral += fp_error;
200 201
201 /* limit the integral term */ 202 /*
203 * We limit the integral here so that it will never
204 * get higher than 30. This prevents it from becoming
205 * too large an input over long periods of time and allows
206 * it to get factored out sooner.
207 *
208 * The value of 30 was chosen through experimentation.
209 */
202 integral_limit = int_tofp(30); 210 integral_limit = int_tofp(30);
203 if (pid->integral > integral_limit) 211 if (pid->integral > integral_limit)
204 pid->integral = integral_limit; 212 pid->integral = integral_limit;
@@ -244,6 +252,34 @@ static inline void update_turbo_state(void)
244 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 252 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
245} 253}
246 254
255#define PCT_TO_HWP(x) (x * 255 / 100)
256static void intel_pstate_hwp_set(void)
257{
258 int min, max, cpu;
259 u64 value, freq;
260
261 get_online_cpus();
262
263 for_each_online_cpu(cpu) {
264 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
265 min = PCT_TO_HWP(limits.min_perf_pct);
266 value &= ~HWP_MIN_PERF(~0L);
267 value |= HWP_MIN_PERF(min);
268
269 max = PCT_TO_HWP(limits.max_perf_pct);
270 if (limits.no_turbo) {
271 rdmsrl( MSR_HWP_CAPABILITIES, freq);
272 max = HWP_GUARANTEED_PERF(freq);
273 }
274
275 value &= ~HWP_MAX_PERF(~0L);
276 value |= HWP_MAX_PERF(max);
277 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
278 }
279
280 put_online_cpus();
281}
282
247/************************** debugfs begin ************************/ 283/************************** debugfs begin ************************/
248static int pid_param_set(void *data, u64 val) 284static int pid_param_set(void *data, u64 val)
249{ 285{
@@ -279,6 +315,8 @@ static void __init intel_pstate_debug_expose_params(void)
279 struct dentry *debugfs_parent; 315 struct dentry *debugfs_parent;
280 int i = 0; 316 int i = 0;
281 317
318 if (hwp_active)
319 return;
282 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 320 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
283 if (IS_ERR_OR_NULL(debugfs_parent)) 321 if (IS_ERR_OR_NULL(debugfs_parent))
284 return; 322 return;
@@ -329,8 +367,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
329 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 367 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
330 return -EPERM; 368 return -EPERM;
331 } 369 }
370
332 limits.no_turbo = clamp_t(int, input, 0, 1); 371 limits.no_turbo = clamp_t(int, input, 0, 1);
333 372
373 if (hwp_active)
374 intel_pstate_hwp_set();
375
334 return count; 376 return count;
335} 377}
336 378
@@ -348,6 +390,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
348 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 390 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
349 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 391 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
350 392
393 if (hwp_active)
394 intel_pstate_hwp_set();
351 return count; 395 return count;
352} 396}
353 397
@@ -363,6 +407,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
363 limits.min_perf_pct = clamp_t(int, input, 0 , 100); 407 limits.min_perf_pct = clamp_t(int, input, 0 , 100);
364 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 408 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
365 409
410 if (hwp_active)
411 intel_pstate_hwp_set();
366 return count; 412 return count;
367} 413}
368 414
@@ -395,8 +441,16 @@ static void __init intel_pstate_sysfs_expose_params(void)
395 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 441 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
396 BUG_ON(rc); 442 BUG_ON(rc);
397} 443}
398
399/************************** sysfs end ************************/ 444/************************** sysfs end ************************/
445
446static void intel_pstate_hwp_enable(void)
447{
448 hwp_active++;
449 pr_info("intel_pstate HWP enabled\n");
450
451 wrmsrl( MSR_PM_ENABLE, 0x1);
452}
453
400static int byt_get_min_pstate(void) 454static int byt_get_min_pstate(void)
401{ 455{
402 u64 value; 456 u64 value;
@@ -569,6 +623,11 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
569 if (limits.no_turbo || limits.turbo_disabled) 623 if (limits.no_turbo || limits.turbo_disabled)
570 max_perf = cpu->pstate.max_pstate; 624 max_perf = cpu->pstate.max_pstate;
571 625
626 /*
627 * performance can be limited by user through sysfs, by cpufreq
628 * policy, or by cpu specific default values determined through
629 * experimentation.
630 */
572 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 631 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
573 *max = clamp_t(int, max_perf_adj, 632 *max = clamp_t(int, max_perf_adj,
574 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 633 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
@@ -648,6 +707,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
648 cpu->prev_mperf = mperf; 707 cpu->prev_mperf = mperf;
649} 708}
650 709
710static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
711{
712 int delay;
713
714 delay = msecs_to_jiffies(50);
715 mod_timer_pinned(&cpu->timer, jiffies + delay);
716}
717
651static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 718static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
652{ 719{
653 int delay; 720 int delay;
@@ -662,11 +729,29 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
662 u32 duration_us; 729 u32 duration_us;
663 u32 sample_time; 730 u32 sample_time;
664 731
732 /*
733 * core_busy is the ratio of actual performance to max
734 * max_pstate is the max non turbo pstate available
735 * current_pstate was the pstate that was requested during
736 * the last sample period.
737 *
738 * We normalize core_busy, which was our actual percent
739 * performance to what we requested during the last sample
740 * period. The result will be a percentage of busy at a
741 * specified pstate.
742 */
665 core_busy = cpu->sample.core_pct_busy; 743 core_busy = cpu->sample.core_pct_busy;
666 max_pstate = int_tofp(cpu->pstate.max_pstate); 744 max_pstate = int_tofp(cpu->pstate.max_pstate);
667 current_pstate = int_tofp(cpu->pstate.current_pstate); 745 current_pstate = int_tofp(cpu->pstate.current_pstate);
668 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 746 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
669 747
748 /*
749 * Since we have a deferred timer, it will not fire unless
750 * we are in C0. So, determine if the actual elapsed time
751 * is significantly greater (3x) than our sample interval. If it
752 * is, then we were idle for a long enough period of time
753 * to adjust our busyness.
754 */
670 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; 755 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
671 duration_us = (u32) ktime_us_delta(cpu->sample.time, 756 duration_us = (u32) ktime_us_delta(cpu->sample.time,
672 cpu->last_sample_time); 757 cpu->last_sample_time);
@@ -694,6 +779,14 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
694 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); 779 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
695} 780}
696 781
782static void intel_hwp_timer_func(unsigned long __data)
783{
784 struct cpudata *cpu = (struct cpudata *) __data;
785
786 intel_pstate_sample(cpu);
787 intel_hwp_set_sample_time(cpu);
788}
789
697static void intel_pstate_timer_func(unsigned long __data) 790static void intel_pstate_timer_func(unsigned long __data)
698{ 791{
699 struct cpudata *cpu = (struct cpudata *) __data; 792 struct cpudata *cpu = (struct cpudata *) __data;
@@ -730,6 +823,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
730 ICPU(0x3f, core_params), 823 ICPU(0x3f, core_params),
731 ICPU(0x45, core_params), 824 ICPU(0x45, core_params),
732 ICPU(0x46, core_params), 825 ICPU(0x46, core_params),
826 ICPU(0x47, core_params),
733 ICPU(0x4c, byt_params), 827 ICPU(0x4c, byt_params),
734 ICPU(0x4f, core_params), 828 ICPU(0x4f, core_params),
735 ICPU(0x56, core_params), 829 ICPU(0x56, core_params),
@@ -737,6 +831,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
737}; 831};
738MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 832MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
739 833
834static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
835 ICPU(0x56, core_params),
836 {}
837};
838
740static int intel_pstate_init_cpu(unsigned int cpunum) 839static int intel_pstate_init_cpu(unsigned int cpunum)
741{ 840{
742 struct cpudata *cpu; 841 struct cpudata *cpu;
@@ -753,9 +852,14 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
753 intel_pstate_get_cpu_pstates(cpu); 852 intel_pstate_get_cpu_pstates(cpu);
754 853
755 init_timer_deferrable(&cpu->timer); 854 init_timer_deferrable(&cpu->timer);
756 cpu->timer.function = intel_pstate_timer_func;
757 cpu->timer.data = (unsigned long)cpu; 855 cpu->timer.data = (unsigned long)cpu;
758 cpu->timer.expires = jiffies + HZ/100; 856 cpu->timer.expires = jiffies + HZ/100;
857
858 if (!hwp_active)
859 cpu->timer.function = intel_pstate_timer_func;
860 else
861 cpu->timer.function = intel_hwp_timer_func;
862
759 intel_pstate_busy_pid_reset(cpu); 863 intel_pstate_busy_pid_reset(cpu);
760 intel_pstate_sample(cpu); 864 intel_pstate_sample(cpu);
761 865
@@ -792,6 +896,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
792 limits.no_turbo = 0; 896 limits.no_turbo = 0;
793 return 0; 897 return 0;
794 } 898 }
899
795 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 900 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
796 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 901 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
797 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 902 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
@@ -801,6 +906,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
801 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 906 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
802 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 907 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
803 908
909 if (hwp_active)
910 intel_pstate_hwp_set();
911
804 return 0; 912 return 0;
805} 913}
806 914
@@ -823,6 +931,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
823 pr_info("intel_pstate CPU %d exiting\n", cpu_num); 931 pr_info("intel_pstate CPU %d exiting\n", cpu_num);
824 932
825 del_timer_sync(&all_cpu_data[cpu_num]->timer); 933 del_timer_sync(&all_cpu_data[cpu_num]->timer);
934 if (hwp_active)
935 return;
936
826 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 937 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
827} 938}
828 939
@@ -866,6 +977,8 @@ static struct cpufreq_driver intel_pstate_driver = {
866}; 977};
867 978
868static int __initdata no_load; 979static int __initdata no_load;
980static int __initdata no_hwp;
981static unsigned int force_load;
869 982
870static int intel_pstate_msrs_not_valid(void) 983static int intel_pstate_msrs_not_valid(void)
871{ 984{
@@ -943,15 +1056,46 @@ static bool intel_pstate_no_acpi_pss(void)
943 return true; 1056 return true;
944} 1057}
945 1058
1059static bool intel_pstate_has_acpi_ppc(void)
1060{
1061 int i;
1062
1063 for_each_possible_cpu(i) {
1064 struct acpi_processor *pr = per_cpu(processors, i);
1065
1066 if (!pr)
1067 continue;
1068 if (acpi_has_method(pr->handle, "_PPC"))
1069 return true;
1070 }
1071 return false;
1072}
1073
1074enum {
1075 PSS,
1076 PPC,
1077};
1078
946struct hw_vendor_info { 1079struct hw_vendor_info {
947 u16 valid; 1080 u16 valid;
948 char oem_id[ACPI_OEM_ID_SIZE]; 1081 char oem_id[ACPI_OEM_ID_SIZE];
949 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1082 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
1083 int oem_pwr_table;
950}; 1084};
951 1085
952/* Hardware vendor-specific info that has its own power management modes */ 1086/* Hardware vendor-specific info that has its own power management modes */
953static struct hw_vendor_info vendor_info[] = { 1087static struct hw_vendor_info vendor_info[] = {
954 {1, "HP ", "ProLiant"}, 1088 {1, "HP ", "ProLiant", PSS},
1089 {1, "ORACLE", "X4-2 ", PPC},
1090 {1, "ORACLE", "X4-2L ", PPC},
1091 {1, "ORACLE", "X4-2B ", PPC},
1092 {1, "ORACLE", "X3-2 ", PPC},
1093 {1, "ORACLE", "X3-2L ", PPC},
1094 {1, "ORACLE", "X3-2B ", PPC},
1095 {1, "ORACLE", "X4470M2 ", PPC},
1096 {1, "ORACLE", "X4270M3 ", PPC},
1097 {1, "ORACLE", "X4270M2 ", PPC},
1098 {1, "ORACLE", "X4170M2 ", PPC},
955 {0, "", ""}, 1099 {0, "", ""},
956}; 1100};
957 1101
@@ -959,6 +1103,15 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
959{ 1103{
960 struct acpi_table_header hdr; 1104 struct acpi_table_header hdr;
961 struct hw_vendor_info *v_info; 1105 struct hw_vendor_info *v_info;
1106 const struct x86_cpu_id *id;
1107 u64 misc_pwr;
1108
1109 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1110 if (id) {
1111 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1112 if ( misc_pwr & (1 << 8))
1113 return true;
1114 }
962 1115
963 if (acpi_disabled || 1116 if (acpi_disabled ||
964 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1117 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
@@ -966,15 +1119,22 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
966 1119
967 for (v_info = vendor_info; v_info->valid; v_info++) { 1120 for (v_info = vendor_info; v_info->valid; v_info++) {
968 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1121 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
969 !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 1122 !strncmp(hdr.oem_table_id, v_info->oem_table_id,
970 intel_pstate_no_acpi_pss()) 1123 ACPI_OEM_TABLE_ID_SIZE))
971 return true; 1124 switch (v_info->oem_pwr_table) {
1125 case PSS:
1126 return intel_pstate_no_acpi_pss();
1127 case PPC:
1128 return intel_pstate_has_acpi_ppc() &&
1129 (!force_load);
1130 }
972 } 1131 }
973 1132
974 return false; 1133 return false;
975} 1134}
976#else /* CONFIG_ACPI not enabled */ 1135#else /* CONFIG_ACPI not enabled */
977static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1136static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1137static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
978#endif /* CONFIG_ACPI */ 1138#endif /* CONFIG_ACPI */
979 1139
980static int __init intel_pstate_init(void) 1140static int __init intel_pstate_init(void)
@@ -982,6 +1142,7 @@ static int __init intel_pstate_init(void)
982 int cpu, rc = 0; 1142 int cpu, rc = 0;
983 const struct x86_cpu_id *id; 1143 const struct x86_cpu_id *id;
984 struct cpu_defaults *cpu_info; 1144 struct cpu_defaults *cpu_info;
1145 struct cpuinfo_x86 *c = &boot_cpu_data;
985 1146
986 if (no_load) 1147 if (no_load)
987 return -ENODEV; 1148 return -ENODEV;
@@ -1011,6 +1172,9 @@ static int __init intel_pstate_init(void)
1011 if (!all_cpu_data) 1172 if (!all_cpu_data)
1012 return -ENOMEM; 1173 return -ENOMEM;
1013 1174
1175 if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
1176 intel_pstate_hwp_enable();
1177
1014 rc = cpufreq_register_driver(&intel_pstate_driver); 1178 rc = cpufreq_register_driver(&intel_pstate_driver);
1015 if (rc) 1179 if (rc)
1016 goto out; 1180 goto out;
@@ -1041,6 +1205,10 @@ static int __init intel_pstate_setup(char *str)
1041 1205
1042 if (!strcmp(str, "disable")) 1206 if (!strcmp(str, "disable"))
1043 no_load = 1; 1207 no_load = 1;
1208 if (!strcmp(str, "no_hwp"))
1209 no_hwp = 1;
1210 if (!strcmp(str, "force"))
1211 force_load = 1;
1044 return 0; 1212 return 0;
1045} 1213}
1046early_param("intel_pstate", intel_pstate_setup); 1214early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c
new file mode 100644
index 000000000000..25fbd6a1374f
--- /dev/null
+++ b/drivers/cpufreq/ls1x-cpufreq.c
@@ -0,0 +1,223 @@
1/*
2 * CPU Frequency Scaling for Loongson 1 SoC
3 *
4 * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#include <linux/clk.h>
12#include <linux/clk-provider.h>
13#include <linux/cpu.h>
14#include <linux/cpufreq.h>
15#include <linux/delay.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19
20#include <asm/mach-loongson1/cpufreq.h>
21#include <asm/mach-loongson1/loongson1.h>
22
23static struct {
24 struct device *dev;
25 struct clk *clk; /* CPU clk */
26 struct clk *mux_clk; /* MUX of CPU clk */
27 struct clk *pll_clk; /* PLL clk */
28 struct clk *osc_clk; /* OSC clk */
29 unsigned int max_freq;
30 unsigned int min_freq;
31} ls1x_cpufreq;
32
33static int ls1x_cpufreq_notifier(struct notifier_block *nb,
34 unsigned long val, void *data)
35{
36 if (val == CPUFREQ_POSTCHANGE)
37 current_cpu_data.udelay_val = loops_per_jiffy;
38
39 return NOTIFY_OK;
40}
41
42static struct notifier_block ls1x_cpufreq_notifier_block = {
43 .notifier_call = ls1x_cpufreq_notifier
44};
45
46static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
47 unsigned int index)
48{
49 unsigned int old_freq, new_freq;
50
51 old_freq = policy->cur;
52 new_freq = policy->freq_table[index].frequency;
53
54 /*
55 * The procedure of reconfiguring CPU clk is as below.
56 *
57 * - Reparent CPU clk to OSC clk
58 * - Reset CPU clock (very important)
59 * - Reconfigure CPU DIV
60 * - Reparent CPU clk back to CPU DIV clk
61 */
62
63 dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
64 clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk);
65 __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
66 LS1X_CLK_PLL_DIV);
67 __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
68 LS1X_CLK_PLL_DIV);
69 clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000);
70 clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk);
71
72 return 0;
73}
74
75static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
76{
77 struct cpufreq_frequency_table *freq_tbl;
78 unsigned int pll_freq, freq;
79 int steps, i, ret;
80
81 pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000;
82
83 steps = 1 << DIV_CPU_WIDTH;
84 freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL);
85 if (!freq_tbl) {
86 dev_err(ls1x_cpufreq.dev,
87 "failed to alloc cpufreq_frequency_table\n");
88 ret = -ENOMEM;
89 goto out;
90 }
91
92 for (i = 0; i < (steps - 1); i++) {
93 freq = pll_freq / (i + 1);
94 if ((freq < ls1x_cpufreq.min_freq) ||
95 (freq > ls1x_cpufreq.max_freq))
96 freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
97 else
98 freq_tbl[i].frequency = freq;
99 dev_dbg(ls1x_cpufreq.dev,
100 "cpufreq table: index %d: frequency %d\n", i,
101 freq_tbl[i].frequency);
102 }
103 freq_tbl[i].frequency = CPUFREQ_TABLE_END;
104
105 policy->clk = ls1x_cpufreq.clk;
106 ret = cpufreq_generic_init(policy, freq_tbl, 0);
107 if (ret)
108 kfree(freq_tbl);
109out:
110 return ret;
111}
112
113static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
114{
115 kfree(policy->freq_table);
116 return 0;
117}
118
119static struct cpufreq_driver ls1x_cpufreq_driver = {
120 .name = "cpufreq-ls1x",
121 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
122 .verify = cpufreq_generic_frequency_table_verify,
123 .target_index = ls1x_cpufreq_target,
124 .get = cpufreq_generic_get,
125 .init = ls1x_cpufreq_init,
126 .exit = ls1x_cpufreq_exit,
127 .attr = cpufreq_generic_attr,
128};
129
130static int ls1x_cpufreq_remove(struct platform_device *pdev)
131{
132 cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
133 CPUFREQ_TRANSITION_NOTIFIER);
134 cpufreq_unregister_driver(&ls1x_cpufreq_driver);
135
136 return 0;
137}
138
139static int ls1x_cpufreq_probe(struct platform_device *pdev)
140{
141 struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
142 struct clk *clk;
143 int ret;
144
145 if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
146 return -EINVAL;
147
148 ls1x_cpufreq.dev = &pdev->dev;
149
150 clk = devm_clk_get(&pdev->dev, pdata->clk_name);
151 if (IS_ERR(clk)) {
152 dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
153 pdata->clk_name);
154 ret = PTR_ERR(clk);
155 goto out;
156 }
157 ls1x_cpufreq.clk = clk;
158
159 clk = clk_get_parent(clk);
160 if (IS_ERR(clk)) {
161 dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
162 __clk_get_name(ls1x_cpufreq.clk));
163 ret = PTR_ERR(clk);
164 goto out;
165 }
166 ls1x_cpufreq.mux_clk = clk;
167
168 clk = clk_get_parent(clk);
169 if (IS_ERR(clk)) {
170 dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
171 __clk_get_name(ls1x_cpufreq.mux_clk));
172 ret = PTR_ERR(clk);
173 goto out;
174 }
175 ls1x_cpufreq.pll_clk = clk;
176
177 clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
178 if (IS_ERR(clk)) {
179 dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
180 pdata->osc_clk_name);
181 ret = PTR_ERR(clk);
182 goto out;
183 }
184 ls1x_cpufreq.osc_clk = clk;
185
186 ls1x_cpufreq.max_freq = pdata->max_freq;
187 ls1x_cpufreq.min_freq = pdata->min_freq;
188
189 ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
190 if (ret) {
191 dev_err(ls1x_cpufreq.dev,
192 "failed to register cpufreq driver: %d\n", ret);
193 goto out;
194 }
195
196 ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
197 CPUFREQ_TRANSITION_NOTIFIER);
198
199 if (!ret)
200 goto out;
201
202 dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
203 ret);
204
205 cpufreq_unregister_driver(&ls1x_cpufreq_driver);
206out:
207 return ret;
208}
209
210static struct platform_driver ls1x_cpufreq_platdrv = {
211 .driver = {
212 .name = "ls1x-cpufreq",
213 .owner = THIS_MODULE,
214 },
215 .probe = ls1x_cpufreq_probe,
216 .remove = ls1x_cpufreq_remove,
217};
218
219module_platform_driver(ls1x_cpufreq_platdrv);
220
221MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
222MODULE_DESCRIPTION("Loongson 1 CPUFreq driver");
223MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 4d2c8e861089..2a0d58959acf 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -603,6 +603,13 @@ static void __exit pcc_cpufreq_exit(void)
603 free_percpu(pcc_cpu_info); 603 free_percpu(pcc_cpu_info);
604} 604}
605 605
606static const struct acpi_device_id processor_device_ids[] = {
607 {ACPI_PROCESSOR_OBJECT_HID, },
608 {ACPI_PROCESSOR_DEVICE_HID, },
609 {},
610};
611MODULE_DEVICE_TABLE(acpi, processor_device_ids);
612
606MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); 613MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
607MODULE_VERSION(PCC_VERSION); 614MODULE_VERSION(PCC_VERSION);
608MODULE_DESCRIPTION("Processor Clocking Control interface driver"); 615MODULE_DESCRIPTION("Processor Clocking Control interface driver");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f0a1a56406eb..8bcdb981d540 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9408,6 +9408,10 @@ static bool page_flip_finished(struct intel_crtc *crtc)
9408 struct drm_device *dev = crtc->base.dev; 9408 struct drm_device *dev = crtc->base.dev;
9409 struct drm_i915_private *dev_priv = dev->dev_private; 9409 struct drm_i915_private *dev_priv = dev->dev_private;
9410 9410
9411 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
9412 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
9413 return true;
9414
9411 /* 9415 /*
9412 * The relevant registers doen't exist on pre-ctg. 9416 * The relevant registers doen't exist on pre-ctg.
9413 * As the flip done interrupt doesn't trigger for mmio 9417 * As the flip done interrupt doesn't trigger for mmio
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5ad45bfff3fe..4bcd91757321 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4450,6 +4450,7 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4450 * vdd might still be enabled do to the delayed vdd off. 4450 * vdd might still be enabled do to the delayed vdd off.
4451 * Make sure vdd is actually turned off here. 4451 * Make sure vdd is actually turned off here.
4452 */ 4452 */
4453 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4453 pps_lock(intel_dp); 4454 pps_lock(intel_dp);
4454 edp_panel_vdd_off_sync(intel_dp); 4455 edp_panel_vdd_off_sync(intel_dp);
4455 pps_unlock(intel_dp); 4456 pps_unlock(intel_dp);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 300c4b3d4669..26baa9c05f6c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -322,6 +322,12 @@ static void radeon_connector_get_edid(struct drm_connector *connector)
322 } 322 }
323 323
324 if (!radeon_connector->edid) { 324 if (!radeon_connector->edid) {
325 /* don't fetch the edid from the vbios if ddc fails and runpm is
326 * enabled so we report disconnected.
327 */
328 if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
329 return;
330
325 if (rdev->is_atom_bios) { 331 if (rdev->is_atom_bios) {
326 /* some laptops provide a hardcoded edid in rom for LCDs */ 332 /* some laptops provide a hardcoded edid in rom for LCDs */
327 if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || 333 if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
@@ -826,6 +832,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
826static enum drm_connector_status 832static enum drm_connector_status
827radeon_lvds_detect(struct drm_connector *connector, bool force) 833radeon_lvds_detect(struct drm_connector *connector, bool force)
828{ 834{
835 struct drm_device *dev = connector->dev;
836 struct radeon_device *rdev = dev->dev_private;
829 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 837 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
830 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 838 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
831 enum drm_connector_status ret = connector_status_disconnected; 839 enum drm_connector_status ret = connector_status_disconnected;
@@ -842,7 +850,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
842 /* check if panel is valid */ 850 /* check if panel is valid */
843 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) 851 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
844 ret = connector_status_connected; 852 ret = connector_status_connected;
845 853 /* don't fetch the edid from the vbios if ddc fails and runpm is
854 * enabled so we report disconnected.
855 */
856 if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
857 ret = connector_status_disconnected;
846 } 858 }
847 859
848 /* check for edid as well */ 860 /* check for edid as well */
@@ -1589,6 +1601,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1589 /* check if panel is valid */ 1601 /* check if panel is valid */
1590 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) 1602 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
1591 ret = connector_status_connected; 1603 ret = connector_status_connected;
1604 /* don't fetch the edid from the vbios if ddc fails and runpm is
1605 * enabled so we report disconnected.
1606 */
1607 if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
1608 ret = connector_status_disconnected;
1592 } 1609 }
1593 /* eDP is always DP */ 1610 /* eDP is always DP */
1594 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1611 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 7784911d78ef..00fc59762e0d 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -185,6 +185,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
185 if (rdev->flags & RADEON_IS_AGP) 185 if (rdev->flags & RADEON_IS_AGP)
186 return false; 186 return false;
187 187
188 /*
189 * Older chips have a HW limitation, they can only generate 40 bits
190 * of address for "64-bit" MSIs which breaks on some platforms, notably
191 * IBM POWER servers, so we limit them
192 */
193 if (rdev->family < CHIP_BONAIRE) {
194 dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
195 rdev->pdev->no_64bit_msi = 1;
196 }
197
188 /* force MSI on */ 198 /* force MSI on */
189 if (radeon_msi == 1) 199 if (radeon_msi == 1)
190 return true; 200 return true;
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 6aac695b1688..9b55e673b67c 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -1084,10 +1084,8 @@ static int g762_probe(struct i2c_client *client, const struct i2c_device_id *id)
1084 if (ret) 1084 if (ret)
1085 goto clock_dis; 1085 goto clock_dis;
1086 1086
1087 data->hwmon_dev = devm_hwmon_device_register_with_groups(dev, 1087 data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
1088 client->name, 1088 data, g762_groups);
1089 data,
1090 g762_groups);
1091 if (IS_ERR(data->hwmon_dev)) { 1089 if (IS_ERR(data->hwmon_dev)) {
1092 ret = PTR_ERR(data->hwmon_dev); 1090 ret = PTR_ERR(data->hwmon_dev);
1093 goto clock_dis; 1091 goto clock_dis;
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
index 22c096ce39ad..513bd6d14293 100644
--- a/drivers/iio/accel/bmc150-accel.c
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -44,6 +44,9 @@
44 44
45#define BMC150_ACCEL_REG_INT_STATUS_2 0x0B 45#define BMC150_ACCEL_REG_INT_STATUS_2 0x0B
46#define BMC150_ACCEL_ANY_MOTION_MASK 0x07 46#define BMC150_ACCEL_ANY_MOTION_MASK 0x07
47#define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0)
48#define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1)
49#define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2)
47#define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3) 50#define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3)
48 51
49#define BMC150_ACCEL_REG_PMU_LPW 0x11 52#define BMC150_ACCEL_REG_PMU_LPW 0x11
@@ -92,9 +95,9 @@
92#define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF 95#define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF
93 96
94/* Slope duration in terms of number of samples */ 97/* Slope duration in terms of number of samples */
95#define BMC150_ACCEL_DEF_SLOPE_DURATION 2 98#define BMC150_ACCEL_DEF_SLOPE_DURATION 1
96/* in terms of multiples of g's/LSB, based on range */ 99/* in terms of multiples of g's/LSB, based on range */
97#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 5 100#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1
98 101
99#define BMC150_ACCEL_REG_XOUT_L 0x02 102#define BMC150_ACCEL_REG_XOUT_L 0x02
100 103
@@ -536,6 +539,9 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
536 if (ret < 0) { 539 if (ret < 0) {
537 dev_err(&data->client->dev, 540 dev_err(&data->client->dev,
538 "Failed: bmc150_accel_set_power_state for %d\n", on); 541 "Failed: bmc150_accel_set_power_state for %d\n", on);
542 if (on)
543 pm_runtime_put_noidle(&data->client->dev);
544
539 return ret; 545 return ret;
540 } 546 }
541 547
@@ -811,6 +817,7 @@ static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
811 817
812 ret = bmc150_accel_setup_any_motion_interrupt(data, state); 818 ret = bmc150_accel_setup_any_motion_interrupt(data, state);
813 if (ret < 0) { 819 if (ret < 0) {
820 bmc150_accel_set_power_state(data, false);
814 mutex_unlock(&data->mutex); 821 mutex_unlock(&data->mutex);
815 return ret; 822 return ret;
816 } 823 }
@@ -846,7 +853,7 @@ static const struct attribute_group bmc150_accel_attrs_group = {
846 853
847static const struct iio_event_spec bmc150_accel_event = { 854static const struct iio_event_spec bmc150_accel_event = {
848 .type = IIO_EV_TYPE_ROC, 855 .type = IIO_EV_TYPE_ROC,
849 .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, 856 .dir = IIO_EV_DIR_EITHER,
850 .mask_separate = BIT(IIO_EV_INFO_VALUE) | 857 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
851 BIT(IIO_EV_INFO_ENABLE) | 858 BIT(IIO_EV_INFO_ENABLE) |
852 BIT(IIO_EV_INFO_PERIOD) 859 BIT(IIO_EV_INFO_PERIOD)
@@ -1054,6 +1061,7 @@ static int bmc150_accel_data_rdy_trigger_set_state(struct iio_trigger *trig,
1054 else 1061 else
1055 ret = bmc150_accel_setup_new_data_interrupt(data, state); 1062 ret = bmc150_accel_setup_new_data_interrupt(data, state);
1056 if (ret < 0) { 1063 if (ret < 0) {
1064 bmc150_accel_set_power_state(data, false);
1057 mutex_unlock(&data->mutex); 1065 mutex_unlock(&data->mutex);
1058 return ret; 1066 return ret;
1059 } 1067 }
@@ -1092,12 +1100,26 @@ static irqreturn_t bmc150_accel_event_handler(int irq, void *private)
1092 else 1100 else
1093 dir = IIO_EV_DIR_RISING; 1101 dir = IIO_EV_DIR_RISING;
1094 1102
1095 if (ret & BMC150_ACCEL_ANY_MOTION_MASK) 1103 if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X)
1104 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
1105 0,
1106 IIO_MOD_X,
1107 IIO_EV_TYPE_ROC,
1108 dir),
1109 data->timestamp);
1110 if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y)
1096 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 1111 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
1097 0, 1112 0,
1098 IIO_MOD_X_OR_Y_OR_Z, 1113 IIO_MOD_Y,
1099 IIO_EV_TYPE_ROC, 1114 IIO_EV_TYPE_ROC,
1100 IIO_EV_DIR_EITHER), 1115 dir),
1116 data->timestamp);
1117 if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z)
1118 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
1119 0,
1120 IIO_MOD_Z,
1121 IIO_EV_TYPE_ROC,
1122 dir),
1101 data->timestamp); 1123 data->timestamp);
1102ack_intr_status: 1124ack_intr_status:
1103 if (!data->dready_trigger_on) 1125 if (!data->dready_trigger_on)
@@ -1354,10 +1376,14 @@ static int bmc150_accel_runtime_suspend(struct device *dev)
1354{ 1376{
1355 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 1377 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1356 struct bmc150_accel_data *data = iio_priv(indio_dev); 1378 struct bmc150_accel_data *data = iio_priv(indio_dev);
1379 int ret;
1357 1380
1358 dev_dbg(&data->client->dev, __func__); 1381 dev_dbg(&data->client->dev, __func__);
1382 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
1383 if (ret < 0)
1384 return -EAGAIN;
1359 1385
1360 return bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); 1386 return 0;
1361} 1387}
1362 1388
1363static int bmc150_accel_runtime_resume(struct device *dev) 1389static int bmc150_accel_runtime_resume(struct device *dev)
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index a23e58c4ed99..320aa72c0349 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -269,6 +269,8 @@ static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index)
269 return ret; 269 return ret;
270 } 270 }
271 271
272 ret &= ~(KXCJK1013_REG_CTRL1_BIT_GSEL0 |
273 KXCJK1013_REG_CTRL1_BIT_GSEL1);
272 ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3); 274 ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3);
273 ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4); 275 ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4);
274 276
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index b58d6302521f..d095efe1ba14 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -152,6 +152,7 @@ static void men_z188_remove(struct mcb_device *dev)
152 152
153static const struct mcb_device_id men_z188_ids[] = { 153static const struct mcb_device_id men_z188_ids[] = {
154 { .device = 0xbc }, 154 { .device = 0xbc },
155 { }
155}; 156};
156MODULE_DEVICE_TABLE(mcb, men_z188_ids); 157MODULE_DEVICE_TABLE(mcb, men_z188_ids);
157 158
diff --git a/drivers/iio/gyro/bmg160.c b/drivers/iio/gyro/bmg160.c
index 1f967e0d688e..d2fa526740ca 100644
--- a/drivers/iio/gyro/bmg160.c
+++ b/drivers/iio/gyro/bmg160.c
@@ -67,6 +67,9 @@
67#define BMG160_REG_INT_EN_0 0x15 67#define BMG160_REG_INT_EN_0 0x15
68#define BMG160_DATA_ENABLE_INT BIT(7) 68#define BMG160_DATA_ENABLE_INT BIT(7)
69 69
70#define BMG160_REG_INT_EN_1 0x16
71#define BMG160_INT1_BIT_OD BIT(1)
72
70#define BMG160_REG_XOUT_L 0x02 73#define BMG160_REG_XOUT_L 0x02
71#define BMG160_AXIS_TO_REG(axis) (BMG160_REG_XOUT_L + (axis * 2)) 74#define BMG160_AXIS_TO_REG(axis) (BMG160_REG_XOUT_L + (axis * 2))
72 75
@@ -82,6 +85,9 @@
82 85
83#define BMG160_REG_INT_STATUS_2 0x0B 86#define BMG160_REG_INT_STATUS_2 0x0B
84#define BMG160_ANY_MOTION_MASK 0x07 87#define BMG160_ANY_MOTION_MASK 0x07
88#define BMG160_ANY_MOTION_BIT_X BIT(0)
89#define BMG160_ANY_MOTION_BIT_Y BIT(1)
90#define BMG160_ANY_MOTION_BIT_Z BIT(2)
85 91
86#define BMG160_REG_TEMP 0x08 92#define BMG160_REG_TEMP 0x08
87#define BMG160_TEMP_CENTER_VAL 23 93#define BMG160_TEMP_CENTER_VAL 23
@@ -222,6 +228,19 @@ static int bmg160_chip_init(struct bmg160_data *data)
222 data->slope_thres = ret; 228 data->slope_thres = ret;
223 229
224 /* Set default interrupt mode */ 230 /* Set default interrupt mode */
231 ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_EN_1);
232 if (ret < 0) {
233 dev_err(&data->client->dev, "Error reading reg_int_en_1\n");
234 return ret;
235 }
236 ret &= ~BMG160_INT1_BIT_OD;
237 ret = i2c_smbus_write_byte_data(data->client,
238 BMG160_REG_INT_EN_1, ret);
239 if (ret < 0) {
240 dev_err(&data->client->dev, "Error writing reg_int_en_1\n");
241 return ret;
242 }
243
225 ret = i2c_smbus_write_byte_data(data->client, 244 ret = i2c_smbus_write_byte_data(data->client,
226 BMG160_REG_INT_RST_LATCH, 245 BMG160_REG_INT_RST_LATCH,
227 BMG160_INT_MODE_LATCH_INT | 246 BMG160_INT_MODE_LATCH_INT |
@@ -250,6 +269,9 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
250 if (ret < 0) { 269 if (ret < 0) {
251 dev_err(&data->client->dev, 270 dev_err(&data->client->dev,
252 "Failed: bmg160_set_power_state for %d\n", on); 271 "Failed: bmg160_set_power_state for %d\n", on);
272 if (on)
273 pm_runtime_put_noidle(&data->client->dev);
274
253 return ret; 275 return ret;
254 } 276 }
255#endif 277#endif
@@ -705,6 +727,7 @@ static int bmg160_write_event_config(struct iio_dev *indio_dev,
705 727
706 ret = bmg160_setup_any_motion_interrupt(data, state); 728 ret = bmg160_setup_any_motion_interrupt(data, state);
707 if (ret < 0) { 729 if (ret < 0) {
730 bmg160_set_power_state(data, false);
708 mutex_unlock(&data->mutex); 731 mutex_unlock(&data->mutex);
709 return ret; 732 return ret;
710 } 733 }
@@ -743,7 +766,7 @@ static const struct attribute_group bmg160_attrs_group = {
743 766
744static const struct iio_event_spec bmg160_event = { 767static const struct iio_event_spec bmg160_event = {
745 .type = IIO_EV_TYPE_ROC, 768 .type = IIO_EV_TYPE_ROC,
746 .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, 769 .dir = IIO_EV_DIR_EITHER,
747 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | 770 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
748 BIT(IIO_EV_INFO_ENABLE) 771 BIT(IIO_EV_INFO_ENABLE)
749}; 772};
@@ -871,6 +894,7 @@ static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig,
871 else 894 else
872 ret = bmg160_setup_new_data_interrupt(data, state); 895 ret = bmg160_setup_new_data_interrupt(data, state);
873 if (ret < 0) { 896 if (ret < 0) {
897 bmg160_set_power_state(data, false);
874 mutex_unlock(&data->mutex); 898 mutex_unlock(&data->mutex);
875 return ret; 899 return ret;
876 } 900 }
@@ -908,10 +932,24 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
908 else 932 else
909 dir = IIO_EV_DIR_FALLING; 933 dir = IIO_EV_DIR_FALLING;
910 934
911 if (ret & BMG160_ANY_MOTION_MASK) 935 if (ret & BMG160_ANY_MOTION_BIT_X)
912 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, 936 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
913 0, 937 0,
914 IIO_MOD_X_OR_Y_OR_Z, 938 IIO_MOD_X,
939 IIO_EV_TYPE_ROC,
940 dir),
941 data->timestamp);
942 if (ret & BMG160_ANY_MOTION_BIT_Y)
943 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
944 0,
945 IIO_MOD_Y,
946 IIO_EV_TYPE_ROC,
947 dir),
948 data->timestamp);
949 if (ret & BMG160_ANY_MOTION_BIT_Z)
950 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
951 0,
952 IIO_MOD_Z,
915 IIO_EV_TYPE_ROC, 953 IIO_EV_TYPE_ROC,
916 dir), 954 dir),
917 data->timestamp); 955 data->timestamp);
@@ -1169,8 +1207,15 @@ static int bmg160_runtime_suspend(struct device *dev)
1169{ 1207{
1170 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 1208 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1171 struct bmg160_data *data = iio_priv(indio_dev); 1209 struct bmg160_data *data = iio_priv(indio_dev);
1210 int ret;
1211
1212 ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND);
1213 if (ret < 0) {
1214 dev_err(&data->client->dev, "set mode failed\n");
1215 return -EAGAIN;
1216 }
1172 1217
1173 return bmg160_set_mode(data, BMG160_MODE_SUSPEND); 1218 return 0;
1174} 1219}
1175 1220
1176static int bmg160_runtime_resume(struct device *dev) 1221static int bmg160_runtime_resume(struct device *dev)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 2ed7905a068f..fc55f0d15b70 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1179,9 +1179,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
1179 } 1179 }
1180 1180
1181 ep_irq_in = &intf->cur_altsetting->endpoint[1].desc; 1181 ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
1182 usb_fill_bulk_urb(xpad->bulk_out, udev, 1182 if (usb_endpoint_is_bulk_out(ep_irq_in)) {
1183 usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress), 1183 usb_fill_bulk_urb(xpad->bulk_out, udev,
1184 xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad); 1184 usb_sndbulkpipe(udev,
1185 ep_irq_in->bEndpointAddress),
1186 xpad->bdata, XPAD_PKT_LEN,
1187 xpad_bulk_out, xpad);
1188 } else {
1189 usb_fill_int_urb(xpad->bulk_out, udev,
1190 usb_sndintpipe(udev,
1191 ep_irq_in->bEndpointAddress),
1192 xpad->bdata, XPAD_PKT_LEN,
1193 xpad_bulk_out, xpad, 0);
1194 }
1185 1195
1186 /* 1196 /*
1187 * Submit the int URB immediately rather than waiting for open 1197 * Submit the int URB immediately rather than waiting for open
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 3fcb6b3cb0bd..f2b978026407 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -428,14 +428,6 @@ static void elantech_report_trackpoint(struct psmouse *psmouse,
428 int x, y; 428 int x, y;
429 u32 t; 429 u32 t;
430 430
431 if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev,
432 !tp_dev,
433 psmouse_fmt("Unexpected trackpoint message\n"))) {
434 if (etd->debug == 1)
435 elantech_packet_dump(psmouse);
436 return;
437 }
438
439 t = get_unaligned_le32(&packet[0]); 431 t = get_unaligned_le32(&packet[0]);
440 432
441 switch (t & ~7U) { 433 switch (t & ~7U) {
@@ -793,7 +785,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
793 unsigned char packet_type = packet[3] & 0x03; 785 unsigned char packet_type = packet[3] & 0x03;
794 bool sanity_check; 786 bool sanity_check;
795 787
796 if ((packet[3] & 0x0f) == 0x06) 788 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
797 return PACKET_TRACKPOINT; 789 return PACKET_TRACKPOINT;
798 790
799 /* 791 /*
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2a7a9174c702..f9472920d986 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -143,6 +143,10 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
143 (const char * const []){"LEN2001", NULL}, 143 (const char * const []){"LEN2001", NULL},
144 1024, 5022, 2508, 4832 144 1024, 5022, 2508, 4832
145 }, 145 },
146 {
147 (const char * const []){"LEN2006", NULL},
148 1264, 5675, 1171, 4688
149 },
146 { } 150 { }
147}; 151};
148 152
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 6ae3cdee0681..cc4f9d80122e 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -217,8 +217,9 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
217 } 217 }
218 218
219 ret = irq_alloc_domain_generic_chips(domain, 32, 1, name, 219 ret = irq_alloc_domain_generic_chips(domain, 32, 1, name,
220 handle_level_irq, 0, 0, 220 handle_fasteoi_irq,
221 IRQCHIP_SKIP_SET_WAKE); 221 IRQ_NOREQUEST | IRQ_NOPROBE |
222 IRQ_NOAUTOEN, 0, 0);
222 if (ret) 223 if (ret)
223 goto err_domain_remove; 224 goto err_domain_remove;
224 225
@@ -230,7 +231,6 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
230 gc->unused = 0; 231 gc->unused = 0;
231 gc->wake_enabled = ~0; 232 gc->wake_enabled = ~0;
232 gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK; 233 gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK;
233 gc->chip_types[0].handler = handle_fasteoi_irq;
234 gc->chip_types[0].chip.irq_eoi = irq_gc_eoi; 234 gc->chip_types[0].chip.irq_eoi = irq_gc_eoi;
235 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; 235 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
236 gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown; 236 gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown;
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index b9f4fb808e49..5fb38a2ac226 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -101,9 +101,9 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
101 int parent_irq; 101 int parent_irq;
102 102
103 parent_irq = irq_of_parse_and_map(dn, irq); 103 parent_irq = irq_of_parse_and_map(dn, irq);
104 if (parent_irq < 0) { 104 if (!parent_irq) {
105 pr_err("failed to map interrupt %d\n", irq); 105 pr_err("failed to map interrupt %d\n", irq);
106 return parent_irq; 106 return -EINVAL;
107 } 107 }
108 108
109 data->irq_map_mask |= be32_to_cpup(map_mask + irq); 109 data->irq_map_mask |= be32_to_cpup(map_mask + irq);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index c15c840987d2..14691a4cb84c 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -135,9 +135,9 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
135 __raw_writel(0xffffffff, data->base + CPU_CLEAR); 135 __raw_writel(0xffffffff, data->base + CPU_CLEAR);
136 136
137 data->parent_irq = irq_of_parse_and_map(np, 0); 137 data->parent_irq = irq_of_parse_and_map(np, 0);
138 if (data->parent_irq < 0) { 138 if (!data->parent_irq) {
139 pr_err("failed to find parent interrupt\n"); 139 pr_err("failed to find parent interrupt\n");
140 ret = data->parent_irq; 140 ret = -EINVAL;
141 goto out_unmap; 141 goto out_unmap;
142 } 142 }
143 143
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b9625968daac..4f4c2a7888e5 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
377 return IRQ_HANDLED; 377 return IRQ_HANDLED;
378} 378}
379 379
380static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
381{
382 unsigned int timeout = 1000;
383 u32 reg;
384
385 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
386 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
387 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
388
389 do {
390 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
391 if (!(reg & SOFTWARE_RESET))
392 break;
393
394 usleep_range(1000, 2000);
395 } while (timeout-- > 0);
396
397 if (timeout == 0)
398 return -ETIMEDOUT;
399
400 return 0;
401}
402
380static int bcm_sf2_sw_setup(struct dsa_switch *ds) 403static int bcm_sf2_sw_setup(struct dsa_switch *ds)
381{ 404{
382 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 405 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
404 *base = of_iomap(dn, i); 427 *base = of_iomap(dn, i);
405 if (*base == NULL) { 428 if (*base == NULL) {
406 pr_err("unable to find register: %s\n", reg_names[i]); 429 pr_err("unable to find register: %s\n", reg_names[i]);
407 return -ENODEV; 430 ret = -ENOMEM;
431 goto out_unmap;
408 } 432 }
409 base++; 433 base++;
410 } 434 }
411 435
436 ret = bcm_sf2_sw_rst(priv);
437 if (ret) {
438 pr_err("unable to software reset switch: %d\n", ret);
439 goto out_unmap;
440 }
441
412 /* Disable all interrupts and request them */ 442 /* Disable all interrupts and request them */
413 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 443 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
414 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 444 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
@@ -484,7 +514,8 @@ out_free_irq0:
484out_unmap: 514out_unmap:
485 base = &priv->core; 515 base = &priv->core;
486 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 516 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
487 iounmap(*base); 517 if (*base)
518 iounmap(*base);
488 base++; 519 base++;
489 } 520 }
490 return ret; 521 return ret;
@@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
733 return 0; 764 return 0;
734} 765}
735 766
736static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
737{
738 unsigned int timeout = 1000;
739 u32 reg;
740
741 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
742 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
743 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
744
745 do {
746 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
747 if (!(reg & SOFTWARE_RESET))
748 break;
749
750 usleep_range(1000, 2000);
751 } while (timeout-- > 0);
752
753 if (timeout == 0)
754 return -ETIMEDOUT;
755
756 return 0;
757}
758
759static int bcm_sf2_sw_resume(struct dsa_switch *ds) 767static int bcm_sf2_sw_resume(struct dsa_switch *ds)
760{ 768{
761 struct bcm_sf2_priv *priv = ds_to_priv(ds); 769 struct bcm_sf2_priv *priv = ds_to_priv(ds);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index dbb41c1923e6..77f8f836cbbe 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp)
8563 if (tnapi->rx_rcb) 8563 if (tnapi->rx_rcb)
8564 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8564 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8565 8565
8566 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8566 if (tnapi->prodring.rx_std &&
8567 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8567 tg3_free_rings(tp); 8568 tg3_free_rings(tp);
8568 return -ENOMEM; 8569 return -ENOMEM;
8569 } 8570 }
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3e8475cae4f9..597c463e384d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4309,11 +4309,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
4309 return -EOPNOTSUPP; 4309 return -EOPNOTSUPP;
4310 4310
4311 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4311 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4312 if (!br_spec)
4313 return -EINVAL;
4312 4314
4313 nla_for_each_nested(attr, br_spec, rem) { 4315 nla_for_each_nested(attr, br_spec, rem) {
4314 if (nla_type(attr) != IFLA_BRIDGE_MODE) 4316 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4315 continue; 4317 continue;
4316 4318
4319 if (nla_len(attr) < sizeof(mode))
4320 return -EINVAL;
4321
4317 mode = nla_get_u16(attr); 4322 mode = nla_get_u16(attr);
4318 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 4323 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4319 return -EINVAL; 4324 return -EINVAL;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a2d72a87cbde..487cd9c4ac0d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1012,7 +1012,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1012 /* igb_get_stats64() might access the rings on this vector, 1012 /* igb_get_stats64() might access the rings on this vector,
1013 * we must wait a grace period before freeing it. 1013 * we must wait a grace period before freeing it.
1014 */ 1014 */
1015 kfree_rcu(q_vector, rcu); 1015 if (q_vector)
1016 kfree_rcu(q_vector, rcu);
1016} 1017}
1017 1018
1018/** 1019/**
@@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter)
1792 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; 1793 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
1793 1794
1794 for (i = 0; i < adapter->num_q_vectors; i++) { 1795 for (i = 0; i < adapter->num_q_vectors; i++) {
1795 napi_synchronize(&(adapter->q_vector[i]->napi)); 1796 if (adapter->q_vector[i]) {
1796 napi_disable(&(adapter->q_vector[i]->napi)); 1797 napi_synchronize(&adapter->q_vector[i]->napi);
1798 napi_disable(&adapter->q_vector[i]->napi);
1799 }
1797 } 1800 }
1798 1801
1799 1802
@@ -3717,7 +3720,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3717 int i; 3720 int i;
3718 3721
3719 for (i = 0; i < adapter->num_tx_queues; i++) 3722 for (i = 0; i < adapter->num_tx_queues; i++)
3720 igb_free_tx_resources(adapter->tx_ring[i]); 3723 if (adapter->tx_ring[i])
3724 igb_free_tx_resources(adapter->tx_ring[i]);
3721} 3725}
3722 3726
3723void igb_unmap_and_free_tx_resource(struct igb_ring *ring, 3727void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
@@ -3782,7 +3786,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3782 int i; 3786 int i;
3783 3787
3784 for (i = 0; i < adapter->num_tx_queues; i++) 3788 for (i = 0; i < adapter->num_tx_queues; i++)
3785 igb_clean_tx_ring(adapter->tx_ring[i]); 3789 if (adapter->tx_ring[i])
3790 igb_clean_tx_ring(adapter->tx_ring[i]);
3786} 3791}
3787 3792
3788/** 3793/**
@@ -3819,7 +3824,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3819 int i; 3824 int i;
3820 3825
3821 for (i = 0; i < adapter->num_rx_queues; i++) 3826 for (i = 0; i < adapter->num_rx_queues; i++)
3822 igb_free_rx_resources(adapter->rx_ring[i]); 3827 if (adapter->rx_ring[i])
3828 igb_free_rx_resources(adapter->rx_ring[i]);
3823} 3829}
3824 3830
3825/** 3831/**
@@ -3874,7 +3880,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3874 int i; 3880 int i;
3875 3881
3876 for (i = 0; i < adapter->num_rx_queues; i++) 3882 for (i = 0; i < adapter->num_rx_queues; i++)
3877 igb_clean_rx_ring(adapter->rx_ring[i]); 3883 if (adapter->rx_ring[i])
3884 igb_clean_rx_ring(adapter->rx_ring[i]);
3878} 3885}
3879 3886
3880/** 3887/**
@@ -7404,6 +7411,8 @@ static int igb_resume(struct device *dev)
7404 pci_restore_state(pdev); 7411 pci_restore_state(pdev);
7405 pci_save_state(pdev); 7412 pci_save_state(pdev);
7406 7413
7414 if (!pci_device_is_present(pdev))
7415 return -ENODEV;
7407 err = pci_enable_device_mem(pdev); 7416 err = pci_enable_device_mem(pdev);
7408 if (err) { 7417 if (err) {
7409 dev_err(&pdev->dev, 7418 dev_err(&pdev->dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d2df4e3d1032..cc51554c9e99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3936,8 +3936,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3936 * if SR-IOV and VMDQ are disabled - otherwise ensure 3936 * if SR-IOV and VMDQ are disabled - otherwise ensure
3937 * that hardware VLAN filters remain enabled. 3937 * that hardware VLAN filters remain enabled.
3938 */ 3938 */
3939 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | 3939 if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
3940 IXGBE_FLAG_SRIOV_ENABLED))) 3940 IXGBE_FLAG_SRIOV_ENABLED))
3941 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); 3941 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3942 } else { 3942 } else {
3943 if (netdev->flags & IFF_ALLMULTI) { 3943 if (netdev->flags & IFF_ALLMULTI) {
@@ -7669,6 +7669,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7669 return -EOPNOTSUPP; 7669 return -EOPNOTSUPP;
7670 7670
7671 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7671 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7672 if (!br_spec)
7673 return -EINVAL;
7672 7674
7673 nla_for_each_nested(attr, br_spec, rem) { 7675 nla_for_each_nested(attr, br_spec, rem) {
7674 __u16 mode; 7676 __u16 mode;
@@ -7677,6 +7679,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7677 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7679 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7678 continue; 7680 continue;
7679 7681
7682 if (nla_len(attr) < sizeof(mode))
7683 return -EINVAL;
7684
7680 mode = nla_get_u16(attr); 7685 mode = nla_get_u16(attr);
7681 if (mode == BRIDGE_MODE_VEPA) { 7686 if (mode == BRIDGE_MODE_VEPA) {
7682 reg = 0; 7687 reg = 0;
@@ -7979,6 +7984,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7979 int i, err, pci_using_dac, expected_gts; 7984 int i, err, pci_using_dac, expected_gts;
7980 unsigned int indices = MAX_TX_QUEUES; 7985 unsigned int indices = MAX_TX_QUEUES;
7981 u8 part_str[IXGBE_PBANUM_LENGTH]; 7986 u8 part_str[IXGBE_PBANUM_LENGTH];
7987 bool disable_dev = false;
7982#ifdef IXGBE_FCOE 7988#ifdef IXGBE_FCOE
7983 u16 device_caps; 7989 u16 device_caps;
7984#endif 7990#endif
@@ -8369,13 +8375,14 @@ err_sw_init:
8369 iounmap(adapter->io_addr); 8375 iounmap(adapter->io_addr);
8370 kfree(adapter->mac_table); 8376 kfree(adapter->mac_table);
8371err_ioremap: 8377err_ioremap:
8378 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
8372 free_netdev(netdev); 8379 free_netdev(netdev);
8373err_alloc_etherdev: 8380err_alloc_etherdev:
8374 pci_release_selected_regions(pdev, 8381 pci_release_selected_regions(pdev,
8375 pci_select_bars(pdev, IORESOURCE_MEM)); 8382 pci_select_bars(pdev, IORESOURCE_MEM));
8376err_pci_reg: 8383err_pci_reg:
8377err_dma: 8384err_dma:
8378 if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 8385 if (!adapter || disable_dev)
8379 pci_disable_device(pdev); 8386 pci_disable_device(pdev);
8380 return err; 8387 return err;
8381} 8388}
@@ -8393,6 +8400,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
8393{ 8400{
8394 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 8401 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8395 struct net_device *netdev = adapter->netdev; 8402 struct net_device *netdev = adapter->netdev;
8403 bool disable_dev;
8396 8404
8397 ixgbe_dbg_adapter_exit(adapter); 8405 ixgbe_dbg_adapter_exit(adapter);
8398 8406
@@ -8442,11 +8450,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
8442 e_dev_info("complete\n"); 8450 e_dev_info("complete\n");
8443 8451
8444 kfree(adapter->mac_table); 8452 kfree(adapter->mac_table);
8453 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
8445 free_netdev(netdev); 8454 free_netdev(netdev);
8446 8455
8447 pci_disable_pcie_error_reporting(pdev); 8456 pci_disable_pcie_error_reporting(pdev);
8448 8457
8449 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 8458 if (disable_dev)
8450 pci_disable_device(pdev); 8459 pci_disable_device(pdev);
8451} 8460}
8452 8461
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 5d2498dcf536..cd5cf6d957c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1546 1546
1547 switch (op) { 1547 switch (op) {
1548 case RES_OP_RESERVE: 1548 case RES_OP_RESERVE:
1549 count = get_param_l(&in_param); 1549 count = get_param_l(&in_param) & 0xffffff;
1550 align = get_param_h(&in_param); 1550 align = get_param_h(&in_param);
1551 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); 1551 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1552 if (err) 1552 if (err)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index db56fa7ce8f9..5b0da3986216 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -177,12 +177,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
177 */ 177 */
178 plat->maxmtu = JUMBO_LEN; 178 plat->maxmtu = JUMBO_LEN;
179 179
180 /* Set default value for multicast hash bins */
181 plat->multicast_filter_bins = HASH_TABLE_SIZE;
182
183 /* Set default value for unicast filter entries */
184 plat->unicast_filter_entries = 1;
185
186 /* 180 /*
187 * Currently only the properties needed on SPEAr600 181 * Currently only the properties needed on SPEAr600
188 * are provided. All other properties should be added 182 * are provided. All other properties should be added
@@ -270,6 +264,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
270 return PTR_ERR(addr); 264 return PTR_ERR(addr);
271 265
272 plat_dat = dev_get_platdata(&pdev->dev); 266 plat_dat = dev_get_platdata(&pdev->dev);
267
268 /* Set default value for multicast hash bins */
269 plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
270
271 /* Set default value for unicast filter entries */
272 plat_dat->unicast_filter_entries = 1;
273
273 if (pdev->dev.of_node) { 274 if (pdev->dev.of_node) {
274 if (!plat_dat) 275 if (!plat_dat)
275 plat_dat = devm_kzalloc(&pdev->dev, 276 plat_dat = devm_kzalloc(&pdev->dev,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e1e335c339e3..be4649a49c5e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2306,9 +2306,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2306 if (ipv6) { 2306 if (ipv6) {
2307 udp_conf.family = AF_INET6; 2307 udp_conf.family = AF_INET6;
2308 udp_conf.use_udp6_tx_checksums = 2308 udp_conf.use_udp6_tx_checksums =
2309 !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); 2309 !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2310 udp_conf.use_udp6_rx_checksums = 2310 udp_conf.use_udp6_rx_checksums =
2311 !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); 2311 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2312 } else { 2312 } else {
2313 udp_conf.family = AF_INET; 2313 udp_conf.family = AF_INET;
2314 udp_conf.local_ip.s_addr = INADDR_ANY; 2314 udp_conf.local_ip.s_addr = INADDR_ANY;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 4f6e66892acc..b894a84e8393 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -155,6 +155,7 @@ enum iwl_ucode_tlv_api {
155 * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests 155 * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
156 * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), 156 * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
157 * which also implies support for the scheduler configuration command 157 * which also implies support for the scheduler configuration command
158 * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
158 */ 159 */
159enum iwl_ucode_tlv_capa { 160enum iwl_ucode_tlv_capa {
160 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), 161 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
@@ -163,6 +164,7 @@ enum iwl_ucode_tlv_capa {
163 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), 164 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
164 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), 165 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
165 IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), 166 IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
167 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
166}; 168};
167 169
168/* The default calibrate table size if not specified by firmware file */ 170/* The default calibrate table size if not specified by firmware file */
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index b62405865b25..b6d2683da3a9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -2448,9 +2448,15 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
2448 2448
2449 switch (vif->type) { 2449 switch (vif->type) {
2450 case NL80211_IFTYPE_STATION: 2450 case NL80211_IFTYPE_STATION:
2451 /* Use aux roc framework (HS20) */ 2451 if (mvm->fw->ucode_capa.capa[0] &
2452 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 2452 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
2453 vif, duration); 2453 /* Use aux roc framework (HS20) */
2454 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
2455 vif, duration);
2456 goto out_unlock;
2457 }
2458 IWL_ERR(mvm, "hotspot not supported\n");
2459 ret = -EINVAL;
2454 goto out_unlock; 2460 goto out_unlock;
2455 case NL80211_IFTYPE_P2P_DEVICE: 2461 case NL80211_IFTYPE_P2P_DEVICE:
2456 /* handle below */ 2462 /* handle below */
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 61f5d36eca6a..846a2e6e34d8 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -2249,6 +2249,16 @@ int rtl_pci_probe(struct pci_dev *pdev,
2249 /*like read eeprom and so on */ 2249 /*like read eeprom and so on */
2250 rtlpriv->cfg->ops->read_eeprom_info(hw); 2250 rtlpriv->cfg->ops->read_eeprom_info(hw);
2251 2251
2252 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2253 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
2254 err = -ENODEV;
2255 goto fail3;
2256 }
2257 rtlpriv->cfg->ops->init_sw_leds(hw);
2258
2259 /*aspm */
2260 rtl_pci_init_aspm(hw);
2261
2252 /* Init mac80211 sw */ 2262 /* Init mac80211 sw */
2253 err = rtl_init_core(hw); 2263 err = rtl_init_core(hw);
2254 if (err) { 2264 if (err) {
@@ -2264,16 +2274,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
2264 goto fail3; 2274 goto fail3;
2265 } 2275 }
2266 2276
2267 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2268 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
2269 err = -ENODEV;
2270 goto fail3;
2271 }
2272 rtlpriv->cfg->ops->init_sw_leds(hw);
2273
2274 /*aspm */
2275 rtl_pci_init_aspm(hw);
2276
2277 err = ieee80211_register_hw(hw); 2277 err = ieee80211_register_hw(hw);
2278 if (err) { 2278 if (err) {
2279 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 2279 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 310d3163dc5b..8ec8200002c7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -3672,8 +3672,9 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
3672 mac->opmode == NL80211_IFTYPE_ADHOC) 3672 mac->opmode == NL80211_IFTYPE_ADHOC)
3673 macid = sta->aid + 1; 3673 macid = sta->aid + 1;
3674 if (wirelessmode == WIRELESS_MODE_N_5G || 3674 if (wirelessmode == WIRELESS_MODE_N_5G ||
3675 wirelessmode == WIRELESS_MODE_AC_5G) 3675 wirelessmode == WIRELESS_MODE_AC_5G ||
3676 ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ]; 3676 wirelessmode == WIRELESS_MODE_A)
3677 ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4;
3677 else 3678 else
3678 ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ]; 3679 ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];
3679 3680
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 4e56a27f9689..fab0d4b42f58 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -39,7 +39,7 @@ struct backend_info {
39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); 39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
40static void connect(struct backend_info *be); 40static void connect(struct backend_info *be);
41static int read_xenbus_vif_flags(struct backend_info *be); 41static int read_xenbus_vif_flags(struct backend_info *be);
42static void backend_create_xenvif(struct backend_info *be); 42static int backend_create_xenvif(struct backend_info *be);
43static void unregister_hotplug_status_watch(struct backend_info *be); 43static void unregister_hotplug_status_watch(struct backend_info *be);
44static void set_backend_state(struct backend_info *be, 44static void set_backend_state(struct backend_info *be,
45 enum xenbus_state state); 45 enum xenbus_state state);
@@ -352,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev,
352 be->state = XenbusStateInitWait; 352 be->state = XenbusStateInitWait;
353 353
354 /* This kicks hotplug scripts, so do it immediately. */ 354 /* This kicks hotplug scripts, so do it immediately. */
355 backend_create_xenvif(be); 355 err = backend_create_xenvif(be);
356 if (err)
357 goto fail;
356 358
357 return 0; 359 return 0;
358 360
@@ -397,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev,
397} 399}
398 400
399 401
400static void backend_create_xenvif(struct backend_info *be) 402static int backend_create_xenvif(struct backend_info *be)
401{ 403{
402 int err; 404 int err;
403 long handle; 405 long handle;
404 struct xenbus_device *dev = be->dev; 406 struct xenbus_device *dev = be->dev;
405 407
406 if (be->vif != NULL) 408 if (be->vif != NULL)
407 return; 409 return 0;
408 410
409 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); 411 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
410 if (err != 1) { 412 if (err != 1) {
411 xenbus_dev_fatal(dev, err, "reading handle"); 413 xenbus_dev_fatal(dev, err, "reading handle");
412 return; 414 return (err < 0) ? err : -EINVAL;
413 } 415 }
414 416
415 be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); 417 be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
@@ -417,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be)
417 err = PTR_ERR(be->vif); 419 err = PTR_ERR(be->vif);
418 be->vif = NULL; 420 be->vif = NULL;
419 xenbus_dev_fatal(dev, err, "creating interface"); 421 xenbus_dev_fatal(dev, err, "creating interface");
420 return; 422 return err;
421 } 423 }
422 424
423 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 425 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
426 return 0;
424} 427}
425 428
426static void backend_disconnect(struct backend_info *be) 429static void backend_disconnect(struct backend_info *be)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 9fab30af0e75..084587d7cd13 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -590,6 +590,20 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
590 return entry; 590 return entry;
591} 591}
592 592
593static int msi_verify_entries(struct pci_dev *dev)
594{
595 struct msi_desc *entry;
596
597 list_for_each_entry(entry, &dev->msi_list, list) {
598 if (!dev->no_64bit_msi || !entry->msg.address_hi)
599 continue;
600 dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
601 " tried to assign one above 4G\n");
602 return -EIO;
603 }
604 return 0;
605}
606
593/** 607/**
594 * msi_capability_init - configure device's MSI capability structure 608 * msi_capability_init - configure device's MSI capability structure
595 * @dev: pointer to the pci_dev data structure of MSI device function 609 * @dev: pointer to the pci_dev data structure of MSI device function
@@ -627,6 +641,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
627 return ret; 641 return ret;
628 } 642 }
629 643
644 ret = msi_verify_entries(dev);
645 if (ret) {
646 msi_mask_irq(entry, mask, ~mask);
647 free_msi_irqs(dev);
648 return ret;
649 }
650
630 ret = populate_msi_sysfs(dev); 651 ret = populate_msi_sysfs(dev);
631 if (ret) { 652 if (ret) {
632 msi_mask_irq(entry, mask, ~mask); 653 msi_mask_irq(entry, mask, ~mask);
@@ -739,6 +760,11 @@ static int msix_capability_init(struct pci_dev *dev,
739 if (ret) 760 if (ret)
740 goto out_avail; 761 goto out_avail;
741 762
763 /* Check if all MSI entries honor device restrictions */
764 ret = msi_verify_entries(dev);
765 if (ret)
766 goto out_free;
767
742 /* 768 /*
743 * Some devices require MSI-X to be enabled before we can touch the 769 * Some devices require MSI-X to be enabled before we can touch the
744 * MSI-X registers. We need to mask all the vectors to prevent 770 * MSI-X registers. We need to mask all the vectors to prevent
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 79e5c94107a9..72533c58c1f3 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -412,6 +412,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
412 struct fc_frame_header *fh; 412 struct fc_frame_header *fh;
413 struct fcoe_rcv_info *fr; 413 struct fcoe_rcv_info *fr;
414 struct fcoe_percpu_s *bg; 414 struct fcoe_percpu_s *bg;
415 struct sk_buff *tmp_skb;
415 unsigned short oxid; 416 unsigned short oxid;
416 417
417 interface = container_of(ptype, struct bnx2fc_interface, 418 interface = container_of(ptype, struct bnx2fc_interface,
@@ -424,6 +425,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
424 goto err; 425 goto err;
425 } 426 }
426 427
428 tmp_skb = skb_share_check(skb, GFP_ATOMIC);
429 if (!tmp_skb)
430 goto err;
431
432 skb = tmp_skb;
433
427 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 434 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
428 printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); 435 printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
429 goto err; 436 goto err;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 49014a143c6a..c1d04d4d3c6c 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -202,6 +202,7 @@ static struct {
202 {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, 202 {"IOMEGA", "Io20S *F", NULL, BLIST_KEY},
203 {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, 203 {"INSITE", "Floptical F*8I", NULL, BLIST_KEY},
204 {"INSITE", "I325VM", NULL, BLIST_KEY}, 204 {"INSITE", "I325VM", NULL, BLIST_KEY},
205 {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
205 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
206 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, 207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
207 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 208 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8adf067ff019..1c3467b82566 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -102,7 +102,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
102 clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq), 102 clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
103 GFP_KERNEL); 103 GFP_KERNEL);
104 if (!clkfreq) { 104 if (!clkfreq) {
105 dev_err(dev, "%s: no memory\n", "freq-table-hz");
106 ret = -ENOMEM; 105 ret = -ENOMEM;
107 goto out; 106 goto out;
108 } 107 }
@@ -112,19 +111,19 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
112 if (ret && (ret != -EINVAL)) { 111 if (ret && (ret != -EINVAL)) {
113 dev_err(dev, "%s: error reading array %d\n", 112 dev_err(dev, "%s: error reading array %d\n",
114 "freq-table-hz", ret); 113 "freq-table-hz", ret);
115 goto free_clkfreq; 114 return ret;
116 } 115 }
117 116
118 for (i = 0; i < sz; i += 2) { 117 for (i = 0; i < sz; i += 2) {
119 ret = of_property_read_string_index(np, 118 ret = of_property_read_string_index(np,
120 "clock-names", i/2, (const char **)&name); 119 "clock-names", i/2, (const char **)&name);
121 if (ret) 120 if (ret)
122 goto free_clkfreq; 121 goto out;
123 122
124 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL); 123 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
125 if (!clki) { 124 if (!clki) {
126 ret = -ENOMEM; 125 ret = -ENOMEM;
127 goto free_clkfreq; 126 goto out;
128 } 127 }
129 128
130 clki->min_freq = clkfreq[i]; 129 clki->min_freq = clkfreq[i];
@@ -134,8 +133,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
134 clki->min_freq, clki->max_freq, clki->name); 133 clki->min_freq, clki->max_freq, clki->name);
135 list_add_tail(&clki->list, &hba->clk_list_head); 134 list_add_tail(&clki->list, &hba->clk_list_head);
136 } 135 }
137free_clkfreq:
138 kfree(clkfreq);
139out: 136out:
140 return ret; 137 return ret;
141} 138}
@@ -162,10 +159,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
162 } 159 }
163 160
164 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); 161 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
165 if (!vreg) { 162 if (!vreg)
166 dev_err(dev, "No memory for %s regulator\n", name); 163 return -ENOMEM;
167 goto out;
168 }
169 164
170 vreg->name = kstrdup(name, GFP_KERNEL); 165 vreg->name = kstrdup(name, GFP_KERNEL);
171 166
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 497c38a4a866..605ca60e8a10 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -744,6 +744,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
744 if (!ufshcd_is_clkgating_allowed(hba)) 744 if (!ufshcd_is_clkgating_allowed(hba))
745 return; 745 return;
746 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); 746 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
747 cancel_work_sync(&hba->clk_gating.ungate_work);
748 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
747} 749}
748 750
749/* Must be called with host lock acquired */ 751/* Must be called with host lock acquired */
@@ -2246,6 +2248,22 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2246 return ret; 2248 return ret;
2247} 2249}
2248 2250
2251 /**
2252 * ufshcd_init_pwr_info - setting the POR (power on reset)
2253 * values in hba power info
2254 * @hba: per-adapter instance
2255 */
2256static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2257{
2258 hba->pwr_info.gear_rx = UFS_PWM_G1;
2259 hba->pwr_info.gear_tx = UFS_PWM_G1;
2260 hba->pwr_info.lane_rx = 1;
2261 hba->pwr_info.lane_tx = 1;
2262 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2263 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2264 hba->pwr_info.hs_rate = 0;
2265}
2266
2249/** 2267/**
2250 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device 2268 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2251 * @hba: per-adapter instance 2269 * @hba: per-adapter instance
@@ -2844,8 +2862,13 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
2844 hba = shost_priv(sdev->host); 2862 hba = shost_priv(sdev->host);
2845 scsi_deactivate_tcq(sdev, hba->nutrs); 2863 scsi_deactivate_tcq(sdev, hba->nutrs);
2846 /* Drop the reference as it won't be needed anymore */ 2864 /* Drop the reference as it won't be needed anymore */
2847 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) 2865 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
2866 unsigned long flags;
2867
2868 spin_lock_irqsave(hba->host->host_lock, flags);
2848 hba->sdev_ufs_device = NULL; 2869 hba->sdev_ufs_device = NULL;
2870 spin_unlock_irqrestore(hba->host->host_lock, flags);
2871 }
2849} 2872}
2850 2873
2851/** 2874/**
@@ -4062,6 +4085,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4062static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) 4085static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4063{ 4086{
4064 int ret = 0; 4087 int ret = 0;
4088 struct scsi_device *sdev_rpmb;
4089 struct scsi_device *sdev_boot;
4065 4090
4066 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, 4091 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4067 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); 4092 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
@@ -4070,26 +4095,27 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4070 hba->sdev_ufs_device = NULL; 4095 hba->sdev_ufs_device = NULL;
4071 goto out; 4096 goto out;
4072 } 4097 }
4098 scsi_device_put(hba->sdev_ufs_device);
4073 4099
4074 hba->sdev_boot = __scsi_add_device(hba->host, 0, 0, 4100 sdev_boot = __scsi_add_device(hba->host, 0, 0,
4075 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); 4101 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
4076 if (IS_ERR(hba->sdev_boot)) { 4102 if (IS_ERR(sdev_boot)) {
4077 ret = PTR_ERR(hba->sdev_boot); 4103 ret = PTR_ERR(sdev_boot);
4078 hba->sdev_boot = NULL;
4079 goto remove_sdev_ufs_device; 4104 goto remove_sdev_ufs_device;
4080 } 4105 }
4106 scsi_device_put(sdev_boot);
4081 4107
4082 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, 4108 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
4083 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); 4109 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
4084 if (IS_ERR(hba->sdev_rpmb)) { 4110 if (IS_ERR(sdev_rpmb)) {
4085 ret = PTR_ERR(hba->sdev_rpmb); 4111 ret = PTR_ERR(sdev_rpmb);
4086 hba->sdev_rpmb = NULL;
4087 goto remove_sdev_boot; 4112 goto remove_sdev_boot;
4088 } 4113 }
4114 scsi_device_put(sdev_rpmb);
4089 goto out; 4115 goto out;
4090 4116
4091remove_sdev_boot: 4117remove_sdev_boot:
4092 scsi_remove_device(hba->sdev_boot); 4118 scsi_remove_device(sdev_boot);
4093remove_sdev_ufs_device: 4119remove_sdev_ufs_device:
4094 scsi_remove_device(hba->sdev_ufs_device); 4120 scsi_remove_device(hba->sdev_ufs_device);
4095out: 4121out:
@@ -4097,30 +4123,6 @@ out:
4097} 4123}
4098 4124
4099/** 4125/**
4100 * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
4101 * ufshcd_scsi_add_wlus()
4102 * @hba: per-adapter instance
4103 *
4104 */
4105static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
4106{
4107 if (hba->sdev_ufs_device) {
4108 scsi_remove_device(hba->sdev_ufs_device);
4109 hba->sdev_ufs_device = NULL;
4110 }
4111
4112 if (hba->sdev_boot) {
4113 scsi_remove_device(hba->sdev_boot);
4114 hba->sdev_boot = NULL;
4115 }
4116
4117 if (hba->sdev_rpmb) {
4118 scsi_remove_device(hba->sdev_rpmb);
4119 hba->sdev_rpmb = NULL;
4120 }
4121}
4122
4123/**
4124 * ufshcd_probe_hba - probe hba to detect device and initialize 4126 * ufshcd_probe_hba - probe hba to detect device and initialize
4125 * @hba: per-adapter instance 4127 * @hba: per-adapter instance
4126 * 4128 *
@@ -4134,6 +4136,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
4134 if (ret) 4136 if (ret)
4135 goto out; 4137 goto out;
4136 4138
4139 ufshcd_init_pwr_info(hba);
4140
4137 /* UniPro link is active now */ 4141 /* UniPro link is active now */
4138 ufshcd_set_link_active(hba); 4142 ufshcd_set_link_active(hba);
4139 4143
@@ -4264,12 +4268,18 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
4264static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, 4268static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
4265 struct ufs_vreg *vreg) 4269 struct ufs_vreg *vreg)
4266{ 4270{
4271 if (!vreg)
4272 return 0;
4273
4267 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); 4274 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
4268} 4275}
4269 4276
4270static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 4277static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
4271 struct ufs_vreg *vreg) 4278 struct ufs_vreg *vreg)
4272{ 4279{
4280 if (!vreg)
4281 return 0;
4282
4273 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 4283 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
4274} 4284}
4275 4285
@@ -4471,7 +4481,7 @@ out:
4471 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) 4481 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
4472 clk_disable_unprepare(clki->clk); 4482 clk_disable_unprepare(clki->clk);
4473 } 4483 }
4474 } else if (!ret && on) { 4484 } else if (on) {
4475 spin_lock_irqsave(hba->host->host_lock, flags); 4485 spin_lock_irqsave(hba->host->host_lock, flags);
4476 hba->clk_gating.state = CLKS_ON; 4486 hba->clk_gating.state = CLKS_ON;
4477 spin_unlock_irqrestore(hba->host->host_lock, flags); 4487 spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -4675,11 +4685,25 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4675{ 4685{
4676 unsigned char cmd[6] = { START_STOP }; 4686 unsigned char cmd[6] = { START_STOP };
4677 struct scsi_sense_hdr sshdr; 4687 struct scsi_sense_hdr sshdr;
4678 struct scsi_device *sdp = hba->sdev_ufs_device; 4688 struct scsi_device *sdp;
4689 unsigned long flags;
4679 int ret; 4690 int ret;
4680 4691
4681 if (!sdp || !scsi_device_online(sdp)) 4692 spin_lock_irqsave(hba->host->host_lock, flags);
4682 return -ENODEV; 4693 sdp = hba->sdev_ufs_device;
4694 if (sdp) {
4695 ret = scsi_device_get(sdp);
4696 if (!ret && !scsi_device_online(sdp)) {
4697 ret = -ENODEV;
4698 scsi_device_put(sdp);
4699 }
4700 } else {
4701 ret = -ENODEV;
4702 }
4703 spin_unlock_irqrestore(hba->host->host_lock, flags);
4704
4705 if (ret)
4706 return ret;
4683 4707
4684 /* 4708 /*
4685 * If scsi commands fail, the scsi mid-layer schedules scsi error- 4709 * If scsi commands fail, the scsi mid-layer schedules scsi error-
@@ -4718,6 +4742,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4718 if (!ret) 4742 if (!ret)
4719 hba->curr_dev_pwr_mode = pwr_mode; 4743 hba->curr_dev_pwr_mode = pwr_mode;
4720out: 4744out:
4745 scsi_device_put(sdp);
4721 hba->host->eh_noresume = 0; 4746 hba->host->eh_noresume = 0;
4722 return ret; 4747 return ret;
4723} 4748}
@@ -5087,7 +5112,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
5087 int ret = 0; 5112 int ret = 0;
5088 5113
5089 if (!hba || !hba->is_powered) 5114 if (!hba || !hba->is_powered)
5090 goto out; 5115 return 0;
5091 5116
5092 if (pm_runtime_suspended(hba->dev)) { 5117 if (pm_runtime_suspended(hba->dev)) {
5093 if (hba->rpm_lvl == hba->spm_lvl) 5118 if (hba->rpm_lvl == hba->spm_lvl)
@@ -5231,7 +5256,6 @@ EXPORT_SYMBOL(ufshcd_shutdown);
5231void ufshcd_remove(struct ufs_hba *hba) 5256void ufshcd_remove(struct ufs_hba *hba)
5232{ 5257{
5233 scsi_remove_host(hba->host); 5258 scsi_remove_host(hba->host);
5234 ufshcd_scsi_remove_wlus(hba);
5235 /* disable interrupts */ 5259 /* disable interrupts */
5236 ufshcd_disable_intr(hba, hba->intr_mask); 5260 ufshcd_disable_intr(hba, hba->intr_mask);
5237 ufshcd_hba_stop(hba); 5261 ufshcd_hba_stop(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 58ecdff5065c..4a574aa45855 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -392,8 +392,6 @@ struct ufs_hba {
392 * "UFS device" W-LU. 392 * "UFS device" W-LU.
393 */ 393 */
394 struct scsi_device *sdev_ufs_device; 394 struct scsi_device *sdev_ufs_device;
395 struct scsi_device *sdev_rpmb;
396 struct scsi_device *sdev_boot;
397 395
398 enum ufs_dev_pwr_mode curr_dev_pwr_mode; 396 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
399 enum uic_link_state uic_link_state; 397 enum uic_link_state uic_link_state;
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 72e12bad14b9..d0d5542efc06 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -376,9 +376,6 @@ static void pump_transfers(unsigned long data)
376 chip = dws->cur_chip; 376 chip = dws->cur_chip;
377 spi = message->spi; 377 spi = message->spi;
378 378
379 if (unlikely(!chip->clk_div))
380 chip->clk_div = dws->max_freq / chip->speed_hz;
381
382 if (message->state == ERROR_STATE) { 379 if (message->state == ERROR_STATE) {
383 message->status = -EIO; 380 message->status = -EIO;
384 goto early_exit; 381 goto early_exit;
@@ -419,7 +416,7 @@ static void pump_transfers(unsigned long data)
419 if (transfer->speed_hz) { 416 if (transfer->speed_hz) {
420 speed = chip->speed_hz; 417 speed = chip->speed_hz;
421 418
422 if (transfer->speed_hz != speed) { 419 if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
423 speed = transfer->speed_hz; 420 speed = transfer->speed_hz;
424 421
425 /* clk_div doesn't support odd number */ 422 /* clk_div doesn't support odd number */
@@ -581,7 +578,6 @@ static int dw_spi_setup(struct spi_device *spi)
581 dev_err(&spi->dev, "No max speed HZ parameter\n"); 578 dev_err(&spi->dev, "No max speed HZ parameter\n");
582 return -EINVAL; 579 return -EINVAL;
583 } 580 }
584 chip->speed_hz = spi->max_speed_hz;
585 581
586 chip->tmode = 0; /* Tx & Rx */ 582 chip->tmode = 0; /* Tx & Rx */
587 /* Default SPI mode is SCPOL = 0, SCPH = 0 */ 583 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 39e2c0a55a28..f63de781c729 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -562,9 +562,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
562 562
563 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8); 563 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
564 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 564 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
565 sspi->word_width; 565 (sspi->word_width >> 1);
566 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 566 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
567 sspi->word_width; 567 (sspi->word_width >> 1);
568 568
569 if (!(spi->mode & SPI_CS_HIGH)) 569 if (!(spi->mode & SPI_CS_HIGH))
570 regval |= SIRFSOC_SPI_CS_IDLE_STAT; 570 regval |= SIRFSOC_SPI_CS_IDLE_STAT;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ebcb33df2eb2..50f20f243981 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -615,13 +615,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
615 sg_free_table(sgt); 615 sg_free_table(sgt);
616 return -ENOMEM; 616 return -ENOMEM;
617 } 617 }
618 sg_buf = page_address(vm_page) + 618 sg_set_page(&sgt->sgl[i], vm_page,
619 ((size_t)buf & ~PAGE_MASK); 619 min, offset_in_page(buf));
620 } else { 620 } else {
621 sg_buf = buf; 621 sg_buf = buf;
622 sg_set_buf(&sgt->sgl[i], sg_buf, min);
622 } 623 }
623 624
624 sg_set_buf(&sgt->sgl[i], sg_buf, min);
625 625
626 buf += min; 626 buf += min;
627 len -= min; 627 len -= min;
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 9935e66935af..eddef9cd2e16 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -275,11 +275,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
275 if (check_fwstate(pmlmepriv, _FW_LINKED) == true) 275 if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
276 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1); 276 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
277 277
278 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 278 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
279 if (ph2c == NULL) 279 if (ph2c == NULL)
280 return _FAIL; 280 return _FAIL;
281 281
282 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL); 282 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
283 if (psurveyPara == NULL) { 283 if (psurveyPara == NULL) {
284 kfree(ph2c); 284 kfree(ph2c);
285 return _FAIL; 285 return _FAIL;
@@ -405,7 +405,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
405 else 405 else
406 RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid)); 406 RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
407 407
408 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 408 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
409 if (pcmd == NULL) { 409 if (pcmd == NULL) {
410 res = _FAIL; 410 res = _FAIL;
411 RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n")); 411 RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
@@ -755,13 +755,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
755 u8 res = _SUCCESS; 755 u8 res = _SUCCESS;
756 756
757 757
758 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 758 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
759 if (ph2c == NULL) { 759 if (ph2c == NULL) {
760 res = _FAIL; 760 res = _FAIL;
761 goto exit; 761 goto exit;
762 } 762 }
763 763
764 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 764 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
765 if (pdrvextra_cmd_parm == NULL) { 765 if (pdrvextra_cmd_parm == NULL) {
766 kfree(ph2c); 766 kfree(ph2c);
767 res = _FAIL; 767 res = _FAIL;
@@ -967,13 +967,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
967 u8 res = _SUCCESS; 967 u8 res = _SUCCESS;
968 968
969 if (enqueue) { 969 if (enqueue) {
970 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 970 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
971 if (ph2c == NULL) { 971 if (ph2c == NULL) {
972 res = _FAIL; 972 res = _FAIL;
973 goto exit; 973 goto exit;
974 } 974 }
975 975
976 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 976 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
977 if (pdrvextra_cmd_parm == NULL) { 977 if (pdrvextra_cmd_parm == NULL) {
978 kfree(ph2c); 978 kfree(ph2c);
979 res = _FAIL; 979 res = _FAIL;
@@ -1010,13 +1010,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
1010 1010
1011 u8 res = _SUCCESS; 1011 u8 res = _SUCCESS;
1012 1012
1013 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 1013 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
1014 if (ph2c == NULL) { 1014 if (ph2c == NULL) {
1015 res = _FAIL; 1015 res = _FAIL;
1016 goto exit; 1016 goto exit;
1017 } 1017 }
1018 1018
1019 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 1019 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
1020 if (pdrvextra_cmd_parm == NULL) { 1020 if (pdrvextra_cmd_parm == NULL) {
1021 kfree(ph2c); 1021 kfree(ph2c);
1022 res = _FAIL; 1022 res = _FAIL;
@@ -1088,13 +1088,13 @@ u8 rtw_ps_cmd(struct adapter *padapter)
1088 1088
1089 u8 res = _SUCCESS; 1089 u8 res = _SUCCESS;
1090 1090
1091 ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 1091 ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
1092 if (ppscmd == NULL) { 1092 if (ppscmd == NULL) {
1093 res = _FAIL; 1093 res = _FAIL;
1094 goto exit; 1094 goto exit;
1095 } 1095 }
1096 1096
1097 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 1097 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
1098 if (pdrvextra_cmd_parm == NULL) { 1098 if (pdrvextra_cmd_parm == NULL) {
1099 kfree(ppscmd); 1099 kfree(ppscmd);
1100 res = _FAIL; 1100 res = _FAIL;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 5ba5099ec20d..70b1bc3e0e63 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -4241,12 +4241,12 @@ void report_survey_event(struct adapter *padapter,
4241 pcmdpriv = &padapter->cmdpriv; 4241 pcmdpriv = &padapter->cmdpriv;
4242 4242
4243 4243
4244 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4244 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4245 if (pcmd_obj == NULL) 4245 if (pcmd_obj == NULL)
4246 return; 4246 return;
4247 4247
4248 cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header)); 4248 cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
4249 pevtcmd = kzalloc(cmdsz, GFP_KERNEL); 4249 pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
4250 if (pevtcmd == NULL) { 4250 if (pevtcmd == NULL) {
4251 kfree(pcmd_obj); 4251 kfree(pcmd_obj);
4252 return; 4252 return;
@@ -4339,12 +4339,12 @@ void report_join_res(struct adapter *padapter, int res)
4339 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 4339 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
4340 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 4340 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
4341 4341
4342 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4342 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4343 if (pcmd_obj == NULL) 4343 if (pcmd_obj == NULL)
4344 return; 4344 return;
4345 4345
4346 cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header)); 4346 cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
4347 pevtcmd = kzalloc(cmdsz, GFP_KERNEL); 4347 pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
4348 if (pevtcmd == NULL) { 4348 if (pevtcmd == NULL) {
4349 kfree(pcmd_obj); 4349 kfree(pcmd_obj);
4350 return; 4350 return;
@@ -4854,11 +4854,11 @@ void survey_timer_hdl(void *function_context)
4854 pmlmeext->scan_abort = false;/* reset */ 4854 pmlmeext->scan_abort = false;/* reset */
4855 } 4855 }
4856 4856
4857 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4857 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4858 if (ph2c == NULL) 4858 if (ph2c == NULL)
4859 goto exit_survey_timer_hdl; 4859 goto exit_survey_timer_hdl;
4860 4860
4861 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL); 4861 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
4862 if (psurveyPara == NULL) { 4862 if (psurveyPara == NULL) {
4863 kfree(ph2c); 4863 kfree(ph2c);
4864 goto exit_survey_timer_hdl; 4864 goto exit_survey_timer_hdl;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 33ccbbbd8ed6..d300369977fa 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -935,7 +935,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
935 return true; 935 return true;
936 } 936 }
937 937
938 bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_KERNEL); 938 bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
939 939
940 subtype = GetFrameSubType(pframe) >> 4; 940 subtype = GetFrameSubType(pframe) >> 4;
941 941
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 407a318b09db..2f87150a21b7 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
47 {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ 47 {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
48 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 48 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
49 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ 49 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
50 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
50 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ 51 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
51 {} /* Terminating entry */ 52 {} /* Terminating entry */
52}; 53};
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 1ab0018271c5..ad09e51ffae4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -50,15 +50,14 @@ struct cpufreq_cooling_device {
50 unsigned int cpufreq_state; 50 unsigned int cpufreq_state;
51 unsigned int cpufreq_val; 51 unsigned int cpufreq_val;
52 struct cpumask allowed_cpus; 52 struct cpumask allowed_cpus;
53 struct list_head node;
53}; 54};
54static DEFINE_IDR(cpufreq_idr); 55static DEFINE_IDR(cpufreq_idr);
55static DEFINE_MUTEX(cooling_cpufreq_lock); 56static DEFINE_MUTEX(cooling_cpufreq_lock);
56 57
57static unsigned int cpufreq_dev_count; 58static unsigned int cpufreq_dev_count;
58 59
59/* notify_table passes value to the CPUFREQ_ADJUST callback function. */ 60static LIST_HEAD(cpufreq_dev_list);
60#define NOTIFY_INVALID NULL
61static struct cpufreq_cooling_device *notify_device;
62 61
63/** 62/**
64 * get_idr - function to get a unique id. 63 * get_idr - function to get a unique id.
@@ -287,15 +286,12 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
287 286
288 cpufreq_device->cpufreq_state = cooling_state; 287 cpufreq_device->cpufreq_state = cooling_state;
289 cpufreq_device->cpufreq_val = clip_freq; 288 cpufreq_device->cpufreq_val = clip_freq;
290 notify_device = cpufreq_device;
291 289
292 for_each_cpu(cpuid, mask) { 290 for_each_cpu(cpuid, mask) {
293 if (is_cpufreq_valid(cpuid)) 291 if (is_cpufreq_valid(cpuid))
294 cpufreq_update_policy(cpuid); 292 cpufreq_update_policy(cpuid);
295 } 293 }
296 294
297 notify_device = NOTIFY_INVALID;
298
299 return 0; 295 return 0;
300} 296}
301 297
@@ -316,21 +312,28 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
316{ 312{
317 struct cpufreq_policy *policy = data; 313 struct cpufreq_policy *policy = data;
318 unsigned long max_freq = 0; 314 unsigned long max_freq = 0;
315 struct cpufreq_cooling_device *cpufreq_dev;
319 316
320 if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID) 317 if (event != CPUFREQ_ADJUST)
321 return 0; 318 return 0;
322 319
323 if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus)) 320 mutex_lock(&cooling_cpufreq_lock);
324 max_freq = notify_device->cpufreq_val; 321 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
325 else 322 if (!cpumask_test_cpu(policy->cpu,
326 return 0; 323 &cpufreq_dev->allowed_cpus))
324 continue;
325
326 if (!cpufreq_dev->cpufreq_val)
327 cpufreq_dev->cpufreq_val = get_cpu_frequency(
328 cpumask_any(&cpufreq_dev->allowed_cpus),
329 cpufreq_dev->cpufreq_state);
327 330
328 /* Never exceed user_policy.max */ 331 max_freq = cpufreq_dev->cpufreq_val;
329 if (max_freq > policy->user_policy.max)
330 max_freq = policy->user_policy.max;
331 332
332 if (policy->max != max_freq) 333 if (policy->max != max_freq)
333 cpufreq_verify_within_limits(policy, 0, max_freq); 334 cpufreq_verify_within_limits(policy, 0, max_freq);
335 }
336 mutex_unlock(&cooling_cpufreq_lock);
334 337
335 return 0; 338 return 0;
336} 339}
@@ -486,6 +489,7 @@ __cpufreq_cooling_register(struct device_node *np,
486 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 489 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
487 CPUFREQ_POLICY_NOTIFIER); 490 CPUFREQ_POLICY_NOTIFIER);
488 cpufreq_dev_count++; 491 cpufreq_dev_count++;
492 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
489 493
490 mutex_unlock(&cooling_cpufreq_lock); 494 mutex_unlock(&cooling_cpufreq_lock);
491 495
@@ -549,6 +553,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
549 553
550 cpufreq_dev = cdev->devdata; 554 cpufreq_dev = cdev->devdata;
551 mutex_lock(&cooling_cpufreq_lock); 555 mutex_lock(&cooling_cpufreq_lock);
556 list_del(&cpufreq_dev->node);
552 cpufreq_dev_count--; 557 cpufreq_dev_count--;
553 558
554 /* Unregister the notifier for the last cpufreq cooling device */ 559 /* Unregister the notifier for the last cpufreq cooling device */
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index 3f5ad25ddca8..b6be572704a4 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -417,13 +417,10 @@ void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
417 417
418 th_zone = sensor_conf->pzone_data; 418 th_zone = sensor_conf->pzone_data;
419 419
420 if (th_zone->therm_dev) 420 thermal_zone_device_unregister(th_zone->therm_dev);
421 thermal_zone_device_unregister(th_zone->therm_dev);
422 421
423 for (i = 0; i < th_zone->cool_dev_size; i++) { 422 for (i = 0; i < th_zone->cool_dev_size; ++i)
424 if (th_zone->cool_dev[i]) 423 cpufreq_cooling_unregister(th_zone->cool_dev[i]);
425 cpufreq_cooling_unregister(th_zone->cool_dev[i]);
426 }
427 424
428 dev_info(sensor_conf->dev, 425 dev_info(sensor_conf->dev,
429 "Exynos: Kernel Thermal management unregistered\n"); 426 "Exynos: Kernel Thermal management unregistered\n");
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 90163b384660..d1ec5804c0bb 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -275,6 +275,7 @@ int st_thermal_unregister(struct platform_device *pdev)
275} 275}
276EXPORT_SYMBOL_GPL(st_thermal_unregister); 276EXPORT_SYMBOL_GPL(st_thermal_unregister);
277 277
278#ifdef CONFIG_PM_SLEEP
278static int st_thermal_suspend(struct device *dev) 279static int st_thermal_suspend(struct device *dev)
279{ 280{
280 struct platform_device *pdev = to_platform_device(dev); 281 struct platform_device *pdev = to_platform_device(dev);
@@ -305,6 +306,8 @@ static int st_thermal_resume(struct device *dev)
305 306
306 return 0; 307 return 0;
307} 308}
309#endif
310
308SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume); 311SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume);
309EXPORT_SYMBOL_GPL(st_thermal_pm_ops); 312EXPORT_SYMBOL_GPL(st_thermal_pm_ops);
310 313
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 56982da4a9e9..bf355050eab6 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -240,32 +240,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
240 return 0; 240 return 0;
241} 241}
242 242
243#ifdef CONFIG_PM_SLEEP
244static int of_serial_suspend(struct device *dev)
245{
246 struct of_serial_info *info = dev_get_drvdata(dev);
247
248 serial8250_suspend_port(info->line);
249 if (info->clk)
250 clk_disable_unprepare(info->clk);
251
252 return 0;
253}
254
255static int of_serial_resume(struct device *dev)
256{
257 struct of_serial_info *info = dev_get_drvdata(dev);
258
259 if (info->clk)
260 clk_prepare_enable(info->clk);
261
262 serial8250_resume_port(info->line);
263
264 return 0;
265}
266#endif
267static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume);
268
269/* 243/*
270 * A few common types, add more as needed. 244 * A few common types, add more as needed.
271 */ 245 */
@@ -297,7 +271,6 @@ static struct platform_driver of_platform_serial_driver = {
297 .name = "of_serial", 271 .name = "of_serial",
298 .owner = THIS_MODULE, 272 .owner = THIS_MODULE,
299 .of_match_table = of_platform_serial_table, 273 .of_match_table = of_platform_serial_table,
300 .pm = &of_serial_pm_ops,
301 }, 274 },
302 .probe = of_platform_serial_probe, 275 .probe = of_platform_serial_probe,
303 .remove = of_platform_serial_remove, 276 .remove = of_platform_serial_remove,
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 39b4081b632d..96fafed92b76 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
44 /* Creative SB Audigy 2 NX */ 44 /* Creative SB Audigy 2 NX */
45 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, 45 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
46 46
47 /* Microsoft Wireless Laser Mouse 6000 Receiver */
48 { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
49
47 /* Microsoft LifeCam-VX700 v2.0 */ 50 /* Microsoft LifeCam-VX700 v2.0 */
48 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, 51 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
49 52
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 711b23019d54..df38e7ef4976 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -791,6 +791,10 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
791 791
792 trb = dwc->ep0_trb; 792 trb = dwc->ep0_trb;
793 793
794 r = next_request(&ep0->request_list);
795 if (!r)
796 return;
797
794 status = DWC3_TRB_SIZE_TRBSTS(trb->size); 798 status = DWC3_TRB_SIZE_TRBSTS(trb->size);
795 if (status == DWC3_TRBSTS_SETUP_PENDING) { 799 if (status == DWC3_TRBSTS_SETUP_PENDING) {
796 dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); 800 dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
@@ -801,10 +805,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
801 return; 805 return;
802 } 806 }
803 807
804 r = next_request(&ep0->request_list);
805 if (!r)
806 return;
807
808 ur = &r->request; 808 ur = &r->request;
809 809
810 length = trb->size & DWC3_TRB_SIZE_MASK; 810 length = trb->size & DWC3_TRB_SIZE_MASK;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 696160d48ae8..388cfd83b6b6 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -22,7 +22,6 @@
22 22
23 23
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/device.h>
26#include <asm/unaligned.h> 25#include <asm/unaligned.h>
27 26
28#include "xhci.h" 27#include "xhci.h"
@@ -1149,9 +1148,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1149 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME 1148 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME
1150 * is enabled, so also enable remote wake here. 1149 * is enabled, so also enable remote wake here.
1151 */ 1150 */
1152 if (hcd->self.root_hub->do_remote_wakeup 1151 if (hcd->self.root_hub->do_remote_wakeup) {
1153 && device_may_wakeup(hcd->self.controller)) {
1154
1155 if (t1 & PORT_CONNECT) { 1152 if (t1 & PORT_CONNECT) {
1156 t2 |= PORT_WKOC_E | PORT_WKDISC_E; 1153 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
1157 t2 &= ~PORT_WKCONN_E; 1154 t2 &= ~PORT_WKCONN_E;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 9a69b1f1b300..142b601f9563 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -281,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
281 if (xhci->quirks & XHCI_COMP_MODE_QUIRK) 281 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
282 pdev->no_d3cold = true; 282 pdev->no_d3cold = true;
283 283
284 return xhci_suspend(xhci); 284 return xhci_suspend(xhci, do_wakeup);
285} 285}
286 286
287static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) 287static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 3d78b0cd674b..646300cbe5f7 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -204,7 +204,15 @@ static int xhci_plat_suspend(struct device *dev)
204 struct usb_hcd *hcd = dev_get_drvdata(dev); 204 struct usb_hcd *hcd = dev_get_drvdata(dev);
205 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 205 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
206 206
207 return xhci_suspend(xhci); 207 /*
208 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
209 * to do wakeup during suspend. Since xhci_plat_suspend is currently
210 * only designed for system suspend, device_may_wakeup() is enough
211 * to dertermine whether host is allowed to do wakeup. Need to
212 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
213 * also applies to runtime suspend.
214 */
215 return xhci_suspend(xhci, device_may_wakeup(dev));
208} 216}
209 217
210static int xhci_plat_resume(struct device *dev) 218static int xhci_plat_resume(struct device *dev)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bc6fcbc16f61..06433aec81d7 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1067,9 +1067,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1067 false); 1067 false);
1068 xhci_ring_cmd_db(xhci); 1068 xhci_ring_cmd_db(xhci);
1069 } else { 1069 } else {
1070 /* Clear our internal halted state and restart the ring(s) */ 1070 /* Clear our internal halted state */
1071 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 1071 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1072 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1073 } 1072 }
1074} 1073}
1075 1074
@@ -1823,22 +1822,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1823 ep->stopped_td = td; 1822 ep->stopped_td = td;
1824 return 0; 1823 return 0;
1825 } else { 1824 } else {
1826 if (trb_comp_code == COMP_STALL) { 1825 if (trb_comp_code == COMP_STALL ||
1827 /* The transfer is completed from the driver's 1826 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1828 * perspective, but we need to issue a set dequeue 1827 trb_comp_code)) {
1829 * command for this stalled endpoint to move the dequeue 1828 /* Issue a reset endpoint command to clear the host side
1830 * pointer past the TD. We can't do that here because 1829 * halt, followed by a set dequeue command to move the
1831 * the halt condition must be cleared first. Let the 1830 * dequeue pointer past the TD.
1832 * USB class driver clear the stall later. 1831 * The class driver clears the device side halt later.
1833 */
1834 ep->stopped_td = td;
1835 ep->stopped_stream = ep_ring->stream_id;
1836 } else if (xhci_requires_manual_halt_cleanup(xhci,
1837 ep_ctx, trb_comp_code)) {
1838 /* Other types of errors halt the endpoint, but the
1839 * class driver doesn't call usb_reset_endpoint() unless
1840 * the error is -EPIPE. Clear the halted status in the
1841 * xHCI hardware manually.
1842 */ 1832 */
1843 xhci_cleanup_halted_endpoint(xhci, 1833 xhci_cleanup_halted_endpoint(xhci,
1844 slot_id, ep_index, ep_ring->stream_id, 1834 slot_id, ep_index, ep_ring->stream_id,
@@ -1958,9 +1948,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1958 else 1948 else
1959 td->urb->actual_length = 0; 1949 td->urb->actual_length = 0;
1960 1950
1961 xhci_cleanup_halted_endpoint(xhci, 1951 return finish_td(xhci, td, event_trb, event, ep, status, false);
1962 slot_id, ep_index, 0, td, event_trb);
1963 return finish_td(xhci, td, event_trb, event, ep, status, true);
1964 } 1952 }
1965 /* 1953 /*
1966 * Did we transfer any data, despite the errors that might have 1954 * Did we transfer any data, despite the errors that might have
@@ -2519,17 +2507,8 @@ cleanup:
2519 if (ret) { 2507 if (ret) {
2520 urb = td->urb; 2508 urb = td->urb;
2521 urb_priv = urb->hcpriv; 2509 urb_priv = urb->hcpriv;
2522 /* Leave the TD around for the reset endpoint function 2510
2523 * to use(but only if it's not a control endpoint, 2511 xhci_urb_free_priv(xhci, urb_priv);
2524 * since we already queued the Set TR dequeue pointer
2525 * command for stalled control endpoints).
2526 */
2527 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2528 (trb_comp_code != COMP_STALL &&
2529 trb_comp_code != COMP_BABBLE))
2530 xhci_urb_free_priv(xhci, urb_priv);
2531 else
2532 kfree(urb_priv);
2533 2512
2534 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2513 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2535 if ((urb->actual_length != urb->transfer_buffer_length && 2514 if ((urb->actual_length != urb->transfer_buffer_length &&
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2a5d45b4cb15..033b46c470bd 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -35,6 +35,8 @@
35#define DRIVER_AUTHOR "Sarah Sharp" 35#define DRIVER_AUTHOR "Sarah Sharp"
36#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 36#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
37 37
38#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
39
38/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 40/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
39static int link_quirk; 41static int link_quirk;
40module_param(link_quirk, int, S_IRUGO | S_IWUSR); 42module_param(link_quirk, int, S_IRUGO | S_IWUSR);
@@ -851,13 +853,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
851 xhci_set_cmd_ring_deq(xhci); 853 xhci_set_cmd_ring_deq(xhci);
852} 854}
853 855
856static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
857{
858 int port_index;
859 __le32 __iomem **port_array;
860 unsigned long flags;
861 u32 t1, t2;
862
863 spin_lock_irqsave(&xhci->lock, flags);
864
865 /* disble usb3 ports Wake bits*/
866 port_index = xhci->num_usb3_ports;
867 port_array = xhci->usb3_ports;
868 while (port_index--) {
869 t1 = readl(port_array[port_index]);
870 t1 = xhci_port_state_to_neutral(t1);
871 t2 = t1 & ~PORT_WAKE_BITS;
872 if (t1 != t2)
873 writel(t2, port_array[port_index]);
874 }
875
876 /* disble usb2 ports Wake bits*/
877 port_index = xhci->num_usb2_ports;
878 port_array = xhci->usb2_ports;
879 while (port_index--) {
880 t1 = readl(port_array[port_index]);
881 t1 = xhci_port_state_to_neutral(t1);
882 t2 = t1 & ~PORT_WAKE_BITS;
883 if (t1 != t2)
884 writel(t2, port_array[port_index]);
885 }
886
887 spin_unlock_irqrestore(&xhci->lock, flags);
888}
889
854/* 890/*
855 * Stop HC (not bus-specific) 891 * Stop HC (not bus-specific)
856 * 892 *
857 * This is called when the machine transition into S3/S4 mode. 893 * This is called when the machine transition into S3/S4 mode.
858 * 894 *
859 */ 895 */
860int xhci_suspend(struct xhci_hcd *xhci) 896int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
861{ 897{
862 int rc = 0; 898 int rc = 0;
863 unsigned int delay = XHCI_MAX_HALT_USEC; 899 unsigned int delay = XHCI_MAX_HALT_USEC;
@@ -868,6 +904,10 @@ int xhci_suspend(struct xhci_hcd *xhci)
868 xhci->shared_hcd->state != HC_STATE_SUSPENDED) 904 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
869 return -EINVAL; 905 return -EINVAL;
870 906
907 /* Clear root port wake on bits if wakeup not allowed. */
908 if (!do_wakeup)
909 xhci_disable_port_wake_on_bits(xhci);
910
871 /* Don't poll the roothubs on bus suspend. */ 911 /* Don't poll the roothubs on bus suspend. */
872 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); 912 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
873 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 913 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2912,68 +2952,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2912 } 2952 }
2913} 2953}
2914 2954
2915/* Deal with stalled endpoints. The core should have sent the control message 2955/* Called when clearing halted device. The core should have sent the control
2916 * to clear the halt condition. However, we need to make the xHCI hardware 2956 * message to clear the device halt condition. The host side of the halt should
2917 * reset its sequence number, since a device will expect a sequence number of 2957 * already be cleared with a reset endpoint command issued when the STALL tx
2918 * zero after the halt condition is cleared. 2958 * event was received.
2959 *
2919 * Context: in_interrupt 2960 * Context: in_interrupt
2920 */ 2961 */
2962
2921void xhci_endpoint_reset(struct usb_hcd *hcd, 2963void xhci_endpoint_reset(struct usb_hcd *hcd,
2922 struct usb_host_endpoint *ep) 2964 struct usb_host_endpoint *ep)
2923{ 2965{
2924 struct xhci_hcd *xhci; 2966 struct xhci_hcd *xhci;
2925 struct usb_device *udev;
2926 unsigned int ep_index;
2927 unsigned long flags;
2928 int ret;
2929 struct xhci_virt_ep *virt_ep;
2930 struct xhci_command *command;
2931 2967
2932 xhci = hcd_to_xhci(hcd); 2968 xhci = hcd_to_xhci(hcd);
2933 udev = (struct usb_device *) ep->hcpriv;
2934 /* Called with a root hub endpoint (or an endpoint that wasn't added
2935 * with xhci_add_endpoint()
2936 */
2937 if (!ep->hcpriv)
2938 return;
2939 ep_index = xhci_get_endpoint_index(&ep->desc);
2940 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2941 if (!virt_ep->stopped_td) {
2942 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2943 "Endpoint 0x%x not halted, refusing to reset.",
2944 ep->desc.bEndpointAddress);
2945 return;
2946 }
2947 if (usb_endpoint_xfer_control(&ep->desc)) {
2948 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2949 "Control endpoint stall already handled.");
2950 return;
2951 }
2952 2969
2953 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
2954 if (!command)
2955 return;
2956
2957 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2958 "Queueing reset endpoint command");
2959 spin_lock_irqsave(&xhci->lock, flags);
2960 ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index);
2961 /* 2970 /*
2962 * Can't change the ring dequeue pointer until it's transitioned to the 2971 * We might need to implement the config ep cmd in xhci 4.8.1 note:
2963 * stopped state, which is only upon a successful reset endpoint 2972 * The Reset Endpoint Command may only be issued to endpoints in the
2964 * command. Better hope that last command worked! 2973 * Halted state. If software wishes reset the Data Toggle or Sequence
2974 * Number of an endpoint that isn't in the Halted state, then software
2975 * may issue a Configure Endpoint Command with the Drop and Add bits set
2976 * for the target endpoint. that is in the Stopped state.
2965 */ 2977 */
2966 if (!ret) {
2967 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2968 kfree(virt_ep->stopped_td);
2969 xhci_ring_cmd_db(xhci);
2970 }
2971 virt_ep->stopped_td = NULL;
2972 virt_ep->stopped_stream = 0;
2973 spin_unlock_irqrestore(&xhci->lock, flags);
2974 2978
2975 if (ret) 2979 /* For now just print debug to follow the situation */
2976 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 2980 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2981 ep->desc.bEndpointAddress);
2977} 2982}
2978 2983
2979static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2984static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index df76d642e719..d745715a1e2f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1746,7 +1746,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
1746void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *)); 1746void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *));
1747 1747
1748#ifdef CONFIG_PM 1748#ifdef CONFIG_PM
1749int xhci_suspend(struct xhci_hcd *xhci); 1749int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
1750int xhci_resume(struct xhci_hcd *xhci, bool hibernated); 1750int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
1751#else 1751#else
1752#define xhci_suspend NULL 1752#define xhci_suspend NULL
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index cfd009dc4018..6c4eb3cf5efd 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ 121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ 122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
123 { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
123 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 124 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
124 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 125 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
125 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 126 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0dad8ce5a609..1ebb351b9e9a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -470,6 +470,39 @@ static const struct usb_device_id id_table_combined[] = {
470 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, 470 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) },
471 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, 471 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) },
472 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, 472 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) },
473 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) },
474 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) },
475 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) },
476 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) },
477 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) },
478 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) },
479 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) },
480 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) },
481 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) },
482 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) },
483 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) },
484 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) },
485 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) },
486 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) },
487 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) },
488 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) },
489 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) },
490 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) },
491 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) },
492 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) },
493 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) },
494 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) },
495 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) },
496 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) },
497 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) },
498 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) },
499 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) },
500 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) },
501 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) },
502 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) },
503 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) },
504 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) },
505 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) },
473 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, 506 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
474 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, 507 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
475 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, 508 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6786b705ccf6..e52409c9be99 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -926,8 +926,8 @@
926#define BAYER_CONTOUR_CABLE_PID 0x6001 926#define BAYER_CONTOUR_CABLE_PID 0x6001
927 927
928/* 928/*
929 * The following are the values for the Matrix Orbital FTDI Range 929 * Matrix Orbital Intelligent USB displays.
930 * Anything in this range will use an FT232RL. 930 * http://www.matrixorbital.com
931 */ 931 */
932#define MTXORB_VID 0x1B3D 932#define MTXORB_VID 0x1B3D
933#define MTXORB_FTDI_RANGE_0100_PID 0x0100 933#define MTXORB_FTDI_RANGE_0100_PID 0x0100
@@ -1186,8 +1186,39 @@
1186#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD 1186#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
1187#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE 1187#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
1188#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF 1188#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
1189 1189#define MTXORB_FTDI_RANGE_4701_PID 0x4701
1190 1190#define MTXORB_FTDI_RANGE_9300_PID 0x9300
1191#define MTXORB_FTDI_RANGE_9301_PID 0x9301
1192#define MTXORB_FTDI_RANGE_9302_PID 0x9302
1193#define MTXORB_FTDI_RANGE_9303_PID 0x9303
1194#define MTXORB_FTDI_RANGE_9304_PID 0x9304
1195#define MTXORB_FTDI_RANGE_9305_PID 0x9305
1196#define MTXORB_FTDI_RANGE_9306_PID 0x9306
1197#define MTXORB_FTDI_RANGE_9307_PID 0x9307
1198#define MTXORB_FTDI_RANGE_9308_PID 0x9308
1199#define MTXORB_FTDI_RANGE_9309_PID 0x9309
1200#define MTXORB_FTDI_RANGE_930A_PID 0x930A
1201#define MTXORB_FTDI_RANGE_930B_PID 0x930B
1202#define MTXORB_FTDI_RANGE_930C_PID 0x930C
1203#define MTXORB_FTDI_RANGE_930D_PID 0x930D
1204#define MTXORB_FTDI_RANGE_930E_PID 0x930E
1205#define MTXORB_FTDI_RANGE_930F_PID 0x930F
1206#define MTXORB_FTDI_RANGE_9310_PID 0x9310
1207#define MTXORB_FTDI_RANGE_9311_PID 0x9311
1208#define MTXORB_FTDI_RANGE_9312_PID 0x9312
1209#define MTXORB_FTDI_RANGE_9313_PID 0x9313
1210#define MTXORB_FTDI_RANGE_9314_PID 0x9314
1211#define MTXORB_FTDI_RANGE_9315_PID 0x9315
1212#define MTXORB_FTDI_RANGE_9316_PID 0x9316
1213#define MTXORB_FTDI_RANGE_9317_PID 0x9317
1214#define MTXORB_FTDI_RANGE_9318_PID 0x9318
1215#define MTXORB_FTDI_RANGE_9319_PID 0x9319
1216#define MTXORB_FTDI_RANGE_931A_PID 0x931A
1217#define MTXORB_FTDI_RANGE_931B_PID 0x931B
1218#define MTXORB_FTDI_RANGE_931C_PID 0x931C
1219#define MTXORB_FTDI_RANGE_931D_PID 0x931D
1220#define MTXORB_FTDI_RANGE_931E_PID 0x931E
1221#define MTXORB_FTDI_RANGE_931F_PID 0x931F
1191 1222
1192/* 1223/*
1193 * The Mobility Lab (TML) 1224 * The Mobility Lab (TML)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 93cb7cebda62..077c714f1285 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -311,24 +311,30 @@ static void usa26_indat_callback(struct urb *urb)
311 if ((data[0] & 0x80) == 0) { 311 if ((data[0] & 0x80) == 0) {
312 /* no errors on individual bytes, only 312 /* no errors on individual bytes, only
313 possible overrun err */ 313 possible overrun err */
314 if (data[0] & RXERROR_OVERRUN) 314 if (data[0] & RXERROR_OVERRUN) {
315 err = TTY_OVERRUN; 315 tty_insert_flip_char(&port->port, 0,
316 else 316 TTY_OVERRUN);
317 err = 0; 317 }
318 for (i = 1; i < urb->actual_length ; ++i) 318 for (i = 1; i < urb->actual_length ; ++i)
319 tty_insert_flip_char(&port->port, data[i], err); 319 tty_insert_flip_char(&port->port, data[i],
320 TTY_NORMAL);
320 } else { 321 } else {
321 /* some bytes had errors, every byte has status */ 322 /* some bytes had errors, every byte has status */
322 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); 323 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
323 for (i = 0; i + 1 < urb->actual_length; i += 2) { 324 for (i = 0; i + 1 < urb->actual_length; i += 2) {
324 int stat = data[i], flag = 0; 325 int stat = data[i];
325 if (stat & RXERROR_OVERRUN) 326 int flag = TTY_NORMAL;
326 flag |= TTY_OVERRUN; 327
327 if (stat & RXERROR_FRAMING) 328 if (stat & RXERROR_OVERRUN) {
328 flag |= TTY_FRAME; 329 tty_insert_flip_char(&port->port, 0,
329 if (stat & RXERROR_PARITY) 330 TTY_OVERRUN);
330 flag |= TTY_PARITY; 331 }
331 /* XXX should handle break (0x10) */ 332 /* XXX should handle break (0x10) */
333 if (stat & RXERROR_PARITY)
334 flag = TTY_PARITY;
335 else if (stat & RXERROR_FRAMING)
336 flag = TTY_FRAME;
337
332 tty_insert_flip_char(&port->port, data[i+1], 338 tty_insert_flip_char(&port->port, data[i+1],
333 flag); 339 flag);
334 } 340 }
@@ -649,14 +655,19 @@ static void usa49_indat_callback(struct urb *urb)
649 } else { 655 } else {
650 /* some bytes had errors, every byte has status */ 656 /* some bytes had errors, every byte has status */
651 for (i = 0; i + 1 < urb->actual_length; i += 2) { 657 for (i = 0; i + 1 < urb->actual_length; i += 2) {
652 int stat = data[i], flag = 0; 658 int stat = data[i];
653 if (stat & RXERROR_OVERRUN) 659 int flag = TTY_NORMAL;
654 flag |= TTY_OVERRUN; 660
655 if (stat & RXERROR_FRAMING) 661 if (stat & RXERROR_OVERRUN) {
656 flag |= TTY_FRAME; 662 tty_insert_flip_char(&port->port, 0,
657 if (stat & RXERROR_PARITY) 663 TTY_OVERRUN);
658 flag |= TTY_PARITY; 664 }
659 /* XXX should handle break (0x10) */ 665 /* XXX should handle break (0x10) */
666 if (stat & RXERROR_PARITY)
667 flag = TTY_PARITY;
668 else if (stat & RXERROR_FRAMING)
669 flag = TTY_FRAME;
670
660 tty_insert_flip_char(&port->port, data[i+1], 671 tty_insert_flip_char(&port->port, data[i+1],
661 flag); 672 flag);
662 } 673 }
@@ -713,15 +724,19 @@ static void usa49wg_indat_callback(struct urb *urb)
713 */ 724 */
714 for (x = 0; x + 1 < len && 725 for (x = 0; x + 1 < len &&
715 i + 1 < urb->actual_length; x += 2) { 726 i + 1 < urb->actual_length; x += 2) {
716 int stat = data[i], flag = 0; 727 int stat = data[i];
728 int flag = TTY_NORMAL;
717 729
718 if (stat & RXERROR_OVERRUN) 730 if (stat & RXERROR_OVERRUN) {
719 flag |= TTY_OVERRUN; 731 tty_insert_flip_char(&port->port, 0,
720 if (stat & RXERROR_FRAMING) 732 TTY_OVERRUN);
721 flag |= TTY_FRAME; 733 }
722 if (stat & RXERROR_PARITY)
723 flag |= TTY_PARITY;
724 /* XXX should handle break (0x10) */ 734 /* XXX should handle break (0x10) */
735 if (stat & RXERROR_PARITY)
736 flag = TTY_PARITY;
737 else if (stat & RXERROR_FRAMING)
738 flag = TTY_FRAME;
739
725 tty_insert_flip_char(&port->port, data[i+1], 740 tty_insert_flip_char(&port->port, data[i+1],
726 flag); 741 flag);
727 i += 2; 742 i += 2;
@@ -773,25 +788,31 @@ static void usa90_indat_callback(struct urb *urb)
773 if ((data[0] & 0x80) == 0) { 788 if ((data[0] & 0x80) == 0) {
774 /* no errors on individual bytes, only 789 /* no errors on individual bytes, only
775 possible overrun err*/ 790 possible overrun err*/
776 if (data[0] & RXERROR_OVERRUN) 791 if (data[0] & RXERROR_OVERRUN) {
777 err = TTY_OVERRUN; 792 tty_insert_flip_char(&port->port, 0,
778 else 793 TTY_OVERRUN);
779 err = 0; 794 }
780 for (i = 1; i < urb->actual_length ; ++i) 795 for (i = 1; i < urb->actual_length ; ++i)
781 tty_insert_flip_char(&port->port, 796 tty_insert_flip_char(&port->port,
782 data[i], err); 797 data[i], TTY_NORMAL);
783 } else { 798 } else {
784 /* some bytes had errors, every byte has status */ 799 /* some bytes had errors, every byte has status */
785 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); 800 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
786 for (i = 0; i + 1 < urb->actual_length; i += 2) { 801 for (i = 0; i + 1 < urb->actual_length; i += 2) {
787 int stat = data[i], flag = 0; 802 int stat = data[i];
788 if (stat & RXERROR_OVERRUN) 803 int flag = TTY_NORMAL;
789 flag |= TTY_OVERRUN; 804
790 if (stat & RXERROR_FRAMING) 805 if (stat & RXERROR_OVERRUN) {
791 flag |= TTY_FRAME; 806 tty_insert_flip_char(
792 if (stat & RXERROR_PARITY) 807 &port->port, 0,
793 flag |= TTY_PARITY; 808 TTY_OVERRUN);
809 }
794 /* XXX should handle break (0x10) */ 810 /* XXX should handle break (0x10) */
811 if (stat & RXERROR_PARITY)
812 flag = TTY_PARITY;
813 else if (stat & RXERROR_FRAMING)
814 flag = TTY_FRAME;
815
795 tty_insert_flip_char(&port->port, 816 tty_insert_flip_char(&port->port,
796 data[i+1], flag); 817 data[i+1], flag);
797 } 818 }
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index a7fe664b6b7d..70a098de429f 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
490 if (*tty_flag == TTY_NORMAL) 490 if (*tty_flag == TTY_NORMAL)
491 *tty_flag = TTY_FRAME; 491 *tty_flag = TTY_FRAME;
492 } 492 }
493 if (lsr & UART_LSR_OE){ 493 if (lsr & UART_LSR_OE) {
494 port->icount.overrun++; 494 port->icount.overrun++;
495 if (*tty_flag == TTY_NORMAL) 495 tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
496 *tty_flag = TTY_OVERRUN;
497 } 496 }
498 } 497 }
499 498
@@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb)
511 if ((len >= 4) && 510 if ((len >= 4) &&
512 (packet[0] == 0x1b) && (packet[1] == 0x1b) && 511 (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
513 ((packet[2] == 0x00) || (packet[2] == 0x01))) { 512 ((packet[2] == 0x00) || (packet[2] == 0x01))) {
514 if (packet[2] == 0x00) { 513 if (packet[2] == 0x00)
515 ssu100_update_lsr(port, packet[3], &flag); 514 ssu100_update_lsr(port, packet[3], &flag);
516 if (flag == TTY_OVERRUN)
517 tty_insert_flip_char(&port->port, 0,
518 TTY_OVERRUN);
519 }
520 if (packet[2] == 0x01) 515 if (packet[2] == 0x01)
521 ssu100_update_msr(port, packet[3]); 516 ssu100_update_msr(port, packet[3]);
522 517
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 2fefaf923e4a..18a283d6de1c 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -103,3 +103,10 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
103 "VL711", 103 "VL711",
104 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 104 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
105 US_FL_NO_ATA_1X), 105 US_FL_NO_ATA_1X),
106
107/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
108UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
109 "Hitachi",
110 "External HDD",
111 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
112 US_FL_IGNORE_UAS),
diff --git a/fs/aio.c b/fs/aio.c
index 84a751005f5b..14b93159ef83 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -165,6 +165,15 @@ static struct vfsmount *aio_mnt;
165static const struct file_operations aio_ring_fops; 165static const struct file_operations aio_ring_fops;
166static const struct address_space_operations aio_ctx_aops; 166static const struct address_space_operations aio_ctx_aops;
167 167
168/* Backing dev info for aio fs.
169 * -no dirty page accounting or writeback happens
170 */
171static struct backing_dev_info aio_fs_backing_dev_info = {
172 .name = "aiofs",
173 .state = 0,
174 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
175};
176
168static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 177static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
169{ 178{
170 struct qstr this = QSTR_INIT("[aio]", 5); 179 struct qstr this = QSTR_INIT("[aio]", 5);
@@ -176,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
176 185
177 inode->i_mapping->a_ops = &aio_ctx_aops; 186 inode->i_mapping->a_ops = &aio_ctx_aops;
178 inode->i_mapping->private_data = ctx; 187 inode->i_mapping->private_data = ctx;
188 inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
179 inode->i_size = PAGE_SIZE * nr_pages; 189 inode->i_size = PAGE_SIZE * nr_pages;
180 190
181 path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); 191 path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
@@ -220,6 +230,9 @@ static int __init aio_setup(void)
220 if (IS_ERR(aio_mnt)) 230 if (IS_ERR(aio_mnt))
221 panic("Failed to create aio fs mount."); 231 panic("Failed to create aio fs mount.");
222 232
233 if (bdi_init(&aio_fs_backing_dev_info))
234 panic("Failed to init aio fs backing dev info.");
235
223 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 236 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
224 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 237 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
225 238
@@ -281,11 +294,6 @@ static const struct file_operations aio_ring_fops = {
281 .mmap = aio_ring_mmap, 294 .mmap = aio_ring_mmap,
282}; 295};
283 296
284static int aio_set_page_dirty(struct page *page)
285{
286 return 0;
287}
288
289#if IS_ENABLED(CONFIG_MIGRATION) 297#if IS_ENABLED(CONFIG_MIGRATION)
290static int aio_migratepage(struct address_space *mapping, struct page *new, 298static int aio_migratepage(struct address_space *mapping, struct page *new,
291 struct page *old, enum migrate_mode mode) 299 struct page *old, enum migrate_mode mode)
@@ -357,7 +365,7 @@ out:
357#endif 365#endif
358 366
359static const struct address_space_operations aio_ctx_aops = { 367static const struct address_space_operations aio_ctx_aops = {
360 .set_page_dirty = aio_set_page_dirty, 368 .set_page_dirty = __set_page_dirty_no_writeback,
361#if IS_ENABLED(CONFIG_MIGRATION) 369#if IS_ENABLED(CONFIG_MIGRATION)
362 .migratepage = aio_migratepage, 370 .migratepage = aio_migratepage,
363#endif 371#endif
@@ -412,7 +420,6 @@ static int aio_setup_ring(struct kioctx *ctx)
412 pr_debug("pid(%d) page[%d]->count=%d\n", 420 pr_debug("pid(%d) page[%d]->count=%d\n",
413 current->pid, i, page_count(page)); 421 current->pid, i, page_count(page));
414 SetPageUptodate(page); 422 SetPageUptodate(page);
415 SetPageDirty(page);
416 unlock_page(page); 423 unlock_page(page);
417 424
418 ctx->ring_pages[i] = page; 425 ctx->ring_pages[i] = page;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d3220d31d3cb..dcd9be32ac57 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1011 bytes = min(bytes, working_bytes); 1011 bytes = min(bytes, working_bytes);
1012 kaddr = kmap_atomic(page_out); 1012 kaddr = kmap_atomic(page_out);
1013 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1013 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
1014 if (*pg_index == (vcnt - 1) && *pg_offset == 0)
1015 memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1016 kunmap_atomic(kaddr); 1014 kunmap_atomic(kaddr);
1017 flush_dcache_page(page_out); 1015 flush_dcache_page(page_out);
1018 1016
@@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1054 1052
1055 return 1; 1053 return 1;
1056} 1054}
1055
1056/*
1057 * When uncompressing data, we need to make sure and zero any parts of
1058 * the biovec that were not filled in by the decompression code. pg_index
1059 * and pg_offset indicate the last page and the last offset of that page
1060 * that have been filled in. This will zero everything remaining in the
1061 * biovec.
1062 */
1063void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
1064 unsigned long pg_index,
1065 unsigned long pg_offset)
1066{
1067 while (pg_index < vcnt) {
1068 struct page *page = bvec[pg_index].bv_page;
1069 unsigned long off = bvec[pg_index].bv_offset;
1070 unsigned long len = bvec[pg_index].bv_len;
1071
1072 if (pg_offset < off)
1073 pg_offset = off;
1074 if (pg_offset < off + len) {
1075 unsigned long bytes = off + len - pg_offset;
1076 char *kaddr;
1077
1078 kaddr = kmap_atomic(page);
1079 memset(kaddr + pg_offset, 0, bytes);
1080 kunmap_atomic(kaddr);
1081 }
1082 pg_index++;
1083 pg_offset = 0;
1084 }
1085}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 0c803b4fbf93..d181f70caae0 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
45 unsigned long nr_pages); 45 unsigned long nr_pages);
46int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 46int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
47 int mirror_num, unsigned long bio_flags); 47 int mirror_num, unsigned long bio_flags);
48 48void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
49 unsigned long pg_index,
50 unsigned long pg_offset);
49struct btrfs_compress_op { 51struct btrfs_compress_op {
50 struct list_head *(*alloc_workspace)(void); 52 struct list_head *(*alloc_workspace)(void);
51 53
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 78285f30909e..617553cdb7d3 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -373,6 +373,8 @@ cont:
373 } 373 }
374done: 374done:
375 kunmap(pages_in[page_in_index]); 375 kunmap(pages_in[page_in_index]);
376 if (!ret)
377 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
376 return ret; 378 return ret;
377} 379}
378 380
@@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
410 goto out; 412 goto out;
411 } 413 }
412 414
415 /*
416 * the caller is already checking against PAGE_SIZE, but lets
417 * move this check closer to the memcpy/memset
418 */
419 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
413 bytes = min_t(unsigned long, destlen, out_len - start_byte); 420 bytes = min_t(unsigned long, destlen, out_len - start_byte);
414 421
415 kaddr = kmap_atomic(dest_page); 422 kaddr = kmap_atomic(dest_page);
416 memcpy(kaddr, workspace->buf + start_byte, bytes); 423 memcpy(kaddr, workspace->buf + start_byte, bytes);
424
425 /*
426 * btrfs_getblock is doing a zero on the tail of the page too,
427 * but this will cover anything missing from the decompressed
428 * data.
429 */
430 if (bytes < destlen)
431 memset(kaddr+bytes, 0, destlen-bytes);
417 kunmap_atomic(kaddr); 432 kunmap_atomic(kaddr);
418out: 433out:
419 return ret; 434 return ret;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 759fa4e2de8f..fb22fd8d8fb8 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -299,6 +299,8 @@ done:
299 zlib_inflateEnd(&workspace->strm); 299 zlib_inflateEnd(&workspace->strm);
300 if (data_in) 300 if (data_in)
301 kunmap(pages_in[page_in_index]); 301 kunmap(pages_in[page_in_index]);
302 if (!ret)
303 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
302 return ret; 304 return ret;
303} 305}
304 306
@@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
310 struct workspace *workspace = list_entry(ws, struct workspace, list); 312 struct workspace *workspace = list_entry(ws, struct workspace, list);
311 int ret = 0; 313 int ret = 0;
312 int wbits = MAX_WBITS; 314 int wbits = MAX_WBITS;
313 unsigned long bytes_left = destlen; 315 unsigned long bytes_left;
314 unsigned long total_out = 0; 316 unsigned long total_out = 0;
317 unsigned long pg_offset = 0;
315 char *kaddr; 318 char *kaddr;
316 319
320 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
321 bytes_left = destlen;
322
317 workspace->strm.next_in = data_in; 323 workspace->strm.next_in = data_in;
318 workspace->strm.avail_in = srclen; 324 workspace->strm.avail_in = srclen;
319 workspace->strm.total_in = 0; 325 workspace->strm.total_in = 0;
@@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
341 unsigned long buf_start; 347 unsigned long buf_start;
342 unsigned long buf_offset; 348 unsigned long buf_offset;
343 unsigned long bytes; 349 unsigned long bytes;
344 unsigned long pg_offset = 0;
345 350
346 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); 351 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
347 if (ret != Z_OK && ret != Z_STREAM_END) 352 if (ret != Z_OK && ret != Z_STREAM_END)
@@ -384,6 +389,17 @@ next:
384 ret = 0; 389 ret = 0;
385 390
386 zlib_inflateEnd(&workspace->strm); 391 zlib_inflateEnd(&workspace->strm);
392
393 /*
394 * this should only happen if zlib returned fewer bytes than we
395 * expected. btrfs_get_block is responsible for zeroing from the
396 * end of the inline extent (destlen) to the end of the page
397 */
398 if (pg_offset < destlen) {
399 kaddr = kmap_atomic(dest_page);
400 memset(kaddr + pg_offset, 0, destlen - pg_offset);
401 kunmap_atomic(kaddr);
402 }
387 return ret; 403 return ret;
388} 404}
389 405
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index ed2b1151b171..7cbdf1b2e4ab 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -774,8 +774,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
774{ 774{
775 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { 775 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
776 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); 776 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
777 dprintk("%s slot is busy\n", __func__); 777 /* Race breaker */
778 return false; 778 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
779 dprintk("%s slot is busy\n", __func__);
780 return false;
781 }
782 rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
779 } 783 }
780 return true; 784 return true;
781} 785}
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 747f3b95bd11..33a46a8dfaf7 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -335,12 +335,15 @@ void nfsd_lockd_shutdown(void);
335 (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) 335 (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
336 336
337#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 337#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
338#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \ 338#define NFSD4_2_SECURITY_ATTRS FATTR4_WORD2_SECURITY_LABEL
339 (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL)
340#else 339#else
341#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0 340#define NFSD4_2_SECURITY_ATTRS 0
342#endif 341#endif
343 342
343#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
344 (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
345 NFSD4_2_SECURITY_ATTRS)
346
344static inline u32 nfsd_suppattrs0(u32 minorversion) 347static inline u32 nfsd_suppattrs0(u32 minorversion)
345{ 348{
346 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 349 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
index a929f86d0ddd..d72b5b35f15e 100644
--- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h
+++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
@@ -60,7 +60,7 @@
60#define ESC1_CLK_SRC 43 60#define ESC1_CLK_SRC 43
61#define HDMI_CLK_SRC 44 61#define HDMI_CLK_SRC 44
62#define VSYNC_CLK_SRC 45 62#define VSYNC_CLK_SRC 45
63#define RBCPR_CLK_SRC 46 63#define MMSS_RBCPR_CLK_SRC 46
64#define RBBMTIMER_CLK_SRC 47 64#define RBBMTIMER_CLK_SRC 47
65#define MAPLE_CLK_SRC 48 65#define MAPLE_CLK_SRC 48
66#define VDP_CLK_SRC 49 66#define VDP_CLK_SRC 49
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index be21af149f11..2839c639f092 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -352,7 +352,6 @@ struct clk_divider {
352#define CLK_DIVIDER_READ_ONLY BIT(5) 352#define CLK_DIVIDER_READ_ONLY BIT(5)
353 353
354extern const struct clk_ops clk_divider_ops; 354extern const struct clk_ops clk_divider_ops;
355extern const struct clk_ops clk_divider_ro_ops;
356struct clk *clk_register_divider(struct device *dev, const char *name, 355struct clk *clk_register_divider(struct device *dev, const char *name,
357 const char *parent_name, unsigned long flags, 356 const char *parent_name, unsigned long flags,
358 void __iomem *reg, u8 shift, u8 width, 357 void __iomem *reg, u8 shift, u8 width,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 503b085b7832..4d078cebafd2 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -217,26 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
217 217
218 218
219struct cpufreq_driver { 219struct cpufreq_driver {
220 char name[CPUFREQ_NAME_LEN]; 220 char name[CPUFREQ_NAME_LEN];
221 u8 flags; 221 u8 flags;
222 void *driver_data; 222 void *driver_data;
223 223
224 /* needed by all drivers */ 224 /* needed by all drivers */
225 int (*init) (struct cpufreq_policy *policy); 225 int (*init)(struct cpufreq_policy *policy);
226 int (*verify) (struct cpufreq_policy *policy); 226 int (*verify)(struct cpufreq_policy *policy);
227 227
228 /* define one out of two */ 228 /* define one out of two */
229 int (*setpolicy) (struct cpufreq_policy *policy); 229 int (*setpolicy)(struct cpufreq_policy *policy);
230 230
231 /* 231 /*
232 * On failure, should always restore frequency to policy->restore_freq 232 * On failure, should always restore frequency to policy->restore_freq
233 * (i.e. old freq). 233 * (i.e. old freq).
234 */ 234 */
235 int (*target) (struct cpufreq_policy *policy, /* Deprecated */ 235 int (*target)(struct cpufreq_policy *policy,
236 unsigned int target_freq, 236 unsigned int target_freq,
237 unsigned int relation); 237 unsigned int relation); /* Deprecated */
238 int (*target_index) (struct cpufreq_policy *policy, 238 int (*target_index)(struct cpufreq_policy *policy,
239 unsigned int index); 239 unsigned int index);
240 /* 240 /*
241 * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION 241 * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
242 * unset. 242 * unset.
@@ -252,27 +252,31 @@ struct cpufreq_driver {
252 * wish to switch to intermediate frequency for some target frequency. 252 * wish to switch to intermediate frequency for some target frequency.
253 * In that case core will directly call ->target_index(). 253 * In that case core will directly call ->target_index().
254 */ 254 */
255 unsigned int (*get_intermediate)(struct cpufreq_policy *policy, 255 unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
256 unsigned int index); 256 unsigned int index);
257 int (*target_intermediate)(struct cpufreq_policy *policy, 257 int (*target_intermediate)(struct cpufreq_policy *policy,
258 unsigned int index); 258 unsigned int index);
259 259
260 /* should be defined, if possible */ 260 /* should be defined, if possible */
261 unsigned int (*get) (unsigned int cpu); 261 unsigned int (*get)(unsigned int cpu);
262 262
263 /* optional */ 263 /* optional */
264 int (*bios_limit) (int cpu, unsigned int *limit); 264 int (*bios_limit)(int cpu, unsigned int *limit);
265
266 int (*exit)(struct cpufreq_policy *policy);
267 void (*stop_cpu)(struct cpufreq_policy *policy);
268 int (*suspend)(struct cpufreq_policy *policy);
269 int (*resume)(struct cpufreq_policy *policy);
270
271 /* Will be called after the driver is fully initialized */
272 void (*ready)(struct cpufreq_policy *policy);
265 273
266 int (*exit) (struct cpufreq_policy *policy); 274 struct freq_attr **attr;
267 void (*stop_cpu) (struct cpufreq_policy *policy);
268 int (*suspend) (struct cpufreq_policy *policy);
269 int (*resume) (struct cpufreq_policy *policy);
270 struct freq_attr **attr;
271 275
272 /* platform specific boost support code */ 276 /* platform specific boost support code */
273 bool boost_supported; 277 bool boost_supported;
274 bool boost_enabled; 278 bool boost_enabled;
275 int (*set_boost) (int state); 279 int (*set_boost)(int state);
276}; 280};
277 281
278/* flags */ 282/* flags */
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 8bbd7bc1043d..03fa332ad2a8 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -72,7 +72,7 @@ struct iio_event_data {
72 72
73#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) 73#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
74 74
75#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) 75#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
76 76
77#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) 77#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
78 78
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ea53b04993f2..a6059bdf7b03 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
703int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 703int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
704void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 704void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
705 705
706bool kvm_is_mmio_pfn(pfn_t pfn); 706bool kvm_is_reserved_pfn(pfn_t pfn);
707 707
708struct kvm_irq_ack_notifier { 708struct kvm_irq_ack_notifier {
709 struct hlist_node link; 709 struct hlist_node link;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 5be8db45e368..4c8ac5fcc224 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -331,6 +331,7 @@ struct pci_dev {
331 unsigned int is_added:1; 331 unsigned int is_added:1;
332 unsigned int is_busmaster:1; /* device is busmaster */ 332 unsigned int is_busmaster:1; /* device is busmaster */
333 unsigned int no_msi:1; /* device may not use msi */ 333 unsigned int no_msi:1; /* device may not use msi */
334 unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
334 unsigned int block_cfg_access:1; /* config space access is blocked */ 335 unsigned int block_cfg_access:1; /* config space access is blocked */
335 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 336 unsigned int broken_parity_status:1; /* Device generates false positive parity */
336 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ 337 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index fe7994c48b75..b2828a06a5a6 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -37,6 +37,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
37int inet_ctl_sock_create(struct sock **sk, unsigned short family, 37int inet_ctl_sock_create(struct sock **sk, unsigned short family,
38 unsigned short type, unsigned char protocol, 38 unsigned short type, unsigned char protocol,
39 struct net *net); 39 struct net *net);
40int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
41 int *addr_len);
40 42
41static inline void inet_ctl_sock_destroy(struct sock *sk) 43static inline void inet_ctl_sock_destroy(struct sock *sk)
42{ 44{
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index e862497f7556..8bb00a27e219 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -184,6 +184,8 @@ struct snd_pcm_ops {
184#define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8) 184#define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8)
185#define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE) 185#define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE)
186#define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE) 186#define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE)
187#define SNDRV_PCM_FMTBIT_DSD_U16_BE _SNDRV_PCM_FMTBIT(DSD_U16_BE)
188#define SNDRV_PCM_FMTBIT_DSD_U32_BE _SNDRV_PCM_FMTBIT(DSD_U32_BE)
187 189
188#ifdef SNDRV_LITTLE_ENDIAN 190#ifdef SNDRV_LITTLE_ENDIAN
189#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE 191#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 6ee586728df9..941d32f007dc 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -220,7 +220,9 @@ typedef int __bitwise snd_pcm_format_t;
220#define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */ 220#define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */
221#define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */ 221#define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */
222#define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */ 222#define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */
223#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_LE 223#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
224#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
225#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE
224 226
225#ifdef SNDRV_LITTLE_ENDIAN 227#ifdef SNDRV_LITTLE_ENDIAN
226#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE 228#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 2ff9706647f2..e5ec470b851f 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -280,6 +280,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
280 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 280 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
281 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 281 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
282 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 282 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
283 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
283 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 284 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
284 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 285 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
285}; 286};
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a6882686ca3a..b9b7dfaf202b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2685,13 +2685,20 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2685 int idx = 0; 2685 int idx = 0;
2686 u32 portid = NETLINK_CB(cb->skb).portid; 2686 u32 portid = NETLINK_CB(cb->skb).portid;
2687 u32 seq = cb->nlh->nlmsg_seq; 2687 u32 seq = cb->nlh->nlmsg_seq;
2688 struct nlattr *extfilt;
2689 u32 filter_mask = 0; 2688 u32 filter_mask = 0;
2690 2689
2691 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), 2690 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
2692 IFLA_EXT_MASK); 2691 struct nlattr *extfilt;
2693 if (extfilt) 2692
2694 filter_mask = nla_get_u32(extfilt); 2693 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
2694 IFLA_EXT_MASK);
2695 if (extfilt) {
2696 if (nla_len(extfilt) < sizeof(filter_mask))
2697 return -EINVAL;
2698
2699 filter_mask = nla_get_u32(extfilt);
2700 }
2701 }
2695 2702
2696 rcu_read_lock(); 2703 rcu_read_lock();
2697 for_each_netdev_rcu(net, dev) { 2704 for_each_netdev_rcu(net, dev) {
@@ -2798,6 +2805,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
2798 if (br_spec) { 2805 if (br_spec) {
2799 nla_for_each_nested(attr, br_spec, rem) { 2806 nla_for_each_nested(attr, br_spec, rem) {
2800 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 2807 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
2808 if (nla_len(attr) < sizeof(flags))
2809 return -EINVAL;
2810
2801 have_flags = true; 2811 have_flags = true;
2802 flags = nla_get_u16(attr); 2812 flags = nla_get_u16(attr);
2803 break; 2813 break;
@@ -2868,6 +2878,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
2868 if (br_spec) { 2878 if (br_spec) {
2869 nla_for_each_nested(attr, br_spec, rem) { 2879 nla_for_each_nested(attr, br_spec, rem) {
2870 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 2880 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
2881 if (nla_len(attr) < sizeof(flags))
2882 return -EINVAL;
2883
2871 have_flags = true; 2884 have_flags = true;
2872 flags = nla_get_u16(attr); 2885 flags = nla_get_u16(attr);
2873 break; 2886 break;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8b7fe5b03906..e67da4e6c324 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1386,6 +1386,17 @@ out:
1386 return pp; 1386 return pp;
1387} 1387}
1388 1388
1389int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1390{
1391 if (sk->sk_family == AF_INET)
1392 return ip_recv_error(sk, msg, len, addr_len);
1393#if IS_ENABLED(CONFIG_IPV6)
1394 if (sk->sk_family == AF_INET6)
1395 return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1396#endif
1397 return -EINVAL;
1398}
1399
1389static int inet_gro_complete(struct sk_buff *skb, int nhoff) 1400static int inet_gro_complete(struct sk_buff *skb, int nhoff)
1390{ 1401{
1391 __be16 newlen = htons(skb->len - nhoff); 1402 __be16 newlen = htons(skb->len - nhoff);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 3e861011e4a3..1a7e979e80ba 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -528,6 +528,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
528 .validate = vti_tunnel_validate, 528 .validate = vti_tunnel_validate,
529 .newlink = vti_newlink, 529 .newlink = vti_newlink,
530 .changelink = vti_changelink, 530 .changelink = vti_changelink,
531 .dellink = ip_tunnel_dellink,
531 .get_size = vti_get_size, 532 .get_size = vti_get_size,
532 .fill_info = vti_fill_info, 533 .fill_info = vti_fill_info,
533}; 534};
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 57f7c9804139..5d740cccf69e 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
217 &ipv6_hdr(skb)->daddr)) 217 &ipv6_hdr(skb)->daddr))
218 continue; 218 continue;
219#endif 219#endif
220 } else {
221 continue;
220 } 222 }
221 223
222 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) 224 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
@@ -853,16 +855,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
853 if (flags & MSG_OOB) 855 if (flags & MSG_OOB)
854 goto out; 856 goto out;
855 857
856 if (flags & MSG_ERRQUEUE) { 858 if (flags & MSG_ERRQUEUE)
857 if (family == AF_INET) { 859 return inet_recv_error(sk, msg, len, addr_len);
858 return ip_recv_error(sk, msg, len, addr_len);
859#if IS_ENABLED(CONFIG_IPV6)
860 } else if (family == AF_INET6) {
861 return pingv6_ops.ipv6_recv_error(sk, msg, len,
862 addr_len);
863#endif
864 }
865 }
866 860
867 skb = skb_recv_datagram(sk, flags, noblock, &err); 861 skb = skb_recv_datagram(sk, flags, noblock, &err);
868 if (!skb) 862 if (!skb)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 39ec0c379545..38c2bcb8dd5d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1598,7 +1598,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1598 u32 urg_hole = 0; 1598 u32 urg_hole = 0;
1599 1599
1600 if (unlikely(flags & MSG_ERRQUEUE)) 1600 if (unlikely(flags & MSG_ERRQUEUE))
1601 return ip_recv_error(sk, msg, len, addr_len); 1601 return inet_recv_error(sk, msg, len, addr_len);
1602 1602
1603 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && 1603 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1604 (sk->sk_state == TCP_ESTABLISHED)) 1604 (sk->sk_state == TCP_ESTABLISHED))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9c7d7621466b..147be2024290 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -598,7 +598,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
598 if (th->rst) 598 if (th->rst)
599 return; 599 return;
600 600
601 if (skb_rtable(skb)->rt_type != RTN_LOCAL) 601 /* If sk not NULL, it means we did a successful lookup and incoming
602 * route had to be correct. prequeue might have dropped our dst.
603 */
604 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
602 return; 605 return;
603 606
604 /* Swap the send and the receive. */ 607 /* Swap the send and the receive. */
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4564e1fca3eb..0e32d2e1bdbf 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -502,11 +502,11 @@ static int ip6gre_rcv(struct sk_buff *skb)
502 502
503 skb->protocol = gre_proto; 503 skb->protocol = gre_proto;
504 /* WCCP version 1 and 2 protocol decoding. 504 /* WCCP version 1 and 2 protocol decoding.
505 * - Change protocol to IP 505 * - Change protocol to IPv6
506 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header 506 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
507 */ 507 */
508 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { 508 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
509 skb->protocol = htons(ETH_P_IP); 509 skb->protocol = htons(ETH_P_IPV6);
510 if ((*(h + offset) & 0xF0) != 0x40) 510 if ((*(h + offset) & 0xF0) != 0x40)
511 offset += 4; 511 offset += 4;
512 } 512 }
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index a071563a7e6e..01e12d0d8fcc 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -69,7 +69,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
69 int nhoff; 69 int nhoff;
70 70
71 if (unlikely(skb_shinfo(skb)->gso_type & 71 if (unlikely(skb_shinfo(skb)->gso_type &
72 ~(SKB_GSO_UDP | 72 ~(SKB_GSO_TCPV4 |
73 SKB_GSO_UDP |
73 SKB_GSO_DODGY | 74 SKB_GSO_DODGY |
74 SKB_GSO_TCP_ECN | 75 SKB_GSO_TCP_ECN |
75 SKB_GSO_GRE | 76 SKB_GSO_GRE |
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index b04ed72c4542..8db6c98fe218 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -79,15 +79,13 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
79 uh->source = src_port; 79 uh->source = src_port;
80 80
81 uh->len = htons(skb->len); 81 uh->len = htons(skb->len);
82 uh->check = 0;
83 82
84 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 83 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
85 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED 84 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
86 | IPSKB_REROUTED); 85 | IPSKB_REROUTED);
87 skb_dst_set(skb, dst); 86 skb_dst_set(skb, dst);
88 87
89 udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, 88 udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len);
90 &sk->sk_v6_daddr, skb->len);
91 89
92 __skb_push(skb, sizeof(*ip6h)); 90 __skb_push(skb, sizeof(*ip6h));
93 skb_reset_network_header(skb); 91 skb_reset_network_header(skb);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 31089d153fd3..bcda14de7f84 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -905,6 +905,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev,
905 return vti6_tnl_create2(dev); 905 return vti6_tnl_create2(dev);
906} 906}
907 907
908static void vti6_dellink(struct net_device *dev, struct list_head *head)
909{
910 struct net *net = dev_net(dev);
911 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
912
913 if (dev != ip6n->fb_tnl_dev)
914 unregister_netdevice_queue(dev, head);
915}
916
908static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], 917static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
909 struct nlattr *data[]) 918 struct nlattr *data[])
910{ 919{
@@ -980,6 +989,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
980 .setup = vti6_dev_setup, 989 .setup = vti6_dev_setup,
981 .validate = vti6_validate, 990 .validate = vti6_validate,
982 .newlink = vti6_newlink, 991 .newlink = vti6_newlink,
992 .dellink = vti6_dellink,
983 .changelink = vti6_changelink, 993 .changelink = vti6_changelink,
984 .get_size = vti6_get_size, 994 .get_size = vti6_get_size,
985 .fill_info = vti6_fill_info, 995 .fill_info = vti6_fill_info,
@@ -1020,6 +1030,7 @@ static int __net_init vti6_init_net(struct net *net)
1020 if (!ip6n->fb_tnl_dev) 1030 if (!ip6n->fb_tnl_dev)
1021 goto err_alloc_dev; 1031 goto err_alloc_dev;
1022 dev_net_set(ip6n->fb_tnl_dev, net); 1032 dev_net_set(ip6n->fb_tnl_dev, net);
1033 ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops;
1023 1034
1024 err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 1035 err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
1025 if (err < 0) 1036 if (err < 0)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ace29b60813c..dc495ae2ead0 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -903,7 +903,10 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
903 if (th->rst) 903 if (th->rst)
904 return; 904 return;
905 905
906 if (!ipv6_unicast_destination(skb)) 906 /* If sk not NULL, it means we did a successful lookup and incoming
907 * route had to be correct. prequeue might have dropped our dst.
908 */
909 if (!sk && !ipv6_unicast_destination(skb))
907 return; 910 return;
908 911
909#ifdef CONFIG_TCP_MD5SIG 912#ifdef CONFIG_TCP_MD5SIG
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 2c699757bccf..5016a6929085 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -611,16 +611,12 @@ __nf_conntrack_confirm(struct sk_buff *skb)
611 */ 611 */
612 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 612 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
613 pr_debug("Confirming conntrack %p\n", ct); 613 pr_debug("Confirming conntrack %p\n", ct);
614 614 /* We have to check the DYING flag inside the lock to prevent
615 /* We have to check the DYING flag after unlink to prevent 615 a race against nf_ct_get_next_corpse() possibly called from
616 * a race against nf_ct_get_next_corpse() possibly called from 616 user context, else we insert an already 'dead' hash, blocking
617 * user context, else we insert an already 'dead' hash, blocking 617 further use of that particular connection -JM */
618 * further use of that particular connection -JM.
619 */
620 nf_ct_del_from_dying_or_unconfirmed_list(ct);
621 618
622 if (unlikely(nf_ct_is_dying(ct))) { 619 if (unlikely(nf_ct_is_dying(ct))) {
623 nf_ct_add_to_dying_list(ct);
624 nf_conntrack_double_unlock(hash, reply_hash); 620 nf_conntrack_double_unlock(hash, reply_hash);
625 local_bh_enable(); 621 local_bh_enable();
626 return NF_ACCEPT; 622 return NF_ACCEPT;
@@ -640,6 +636,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
640 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) 636 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
641 goto out; 637 goto out;
642 638
639 nf_ct_del_from_dying_or_unconfirmed_list(ct);
640
643 /* Timer relative to confirmation time, not original 641 /* Timer relative to confirmation time, not original
644 setting time, otherwise we'd get timer wrap in 642 setting time, otherwise we'd get timer wrap in
645 weird delay cases. */ 643 weird delay cases. */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 87d20f48ff06..07c04a841ba0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -378,7 +378,7 @@ static void unregister_prot_hook(struct sock *sk, bool sync)
378 __unregister_prot_hook(sk, sync); 378 __unregister_prot_hook(sk, sync);
379} 379}
380 380
381static inline __pure struct page *pgv_to_page(void *addr) 381static inline struct page * __pure pgv_to_page(void *addr)
382{ 382{
383 if (is_vmalloc_addr(addr)) 383 if (is_vmalloc_addr(addr))
384 return vmalloc_to_page(addr); 384 return vmalloc_to_page(addr);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 3f959c681885..f9c052d508f0 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1019,17 +1019,12 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
1019 xid = *p++; 1019 xid = *p++;
1020 calldir = *p; 1020 calldir = *p;
1021 1021
1022 if (bc_xprt) 1022 if (!bc_xprt)
1023 req = xprt_lookup_rqst(bc_xprt, xid);
1024
1025 if (!req) {
1026 printk(KERN_NOTICE
1027 "%s: Got unrecognized reply: "
1028 "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
1029 __func__, ntohl(calldir),
1030 bc_xprt, ntohl(xid));
1031 return -EAGAIN; 1023 return -EAGAIN;
1032 } 1024 spin_lock_bh(&bc_xprt->transport_lock);
1025 req = xprt_lookup_rqst(bc_xprt, xid);
1026 if (!req)
1027 goto unlock_notfound;
1033 1028
1034 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); 1029 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
1035 /* 1030 /*
@@ -1040,11 +1035,21 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
1040 dst = &req->rq_private_buf.head[0]; 1035 dst = &req->rq_private_buf.head[0];
1041 src = &rqstp->rq_arg.head[0]; 1036 src = &rqstp->rq_arg.head[0];
1042 if (dst->iov_len < src->iov_len) 1037 if (dst->iov_len < src->iov_len)
1043 return -EAGAIN; /* whatever; just giving up. */ 1038 goto unlock_eagain; /* whatever; just giving up. */
1044 memcpy(dst->iov_base, src->iov_base, src->iov_len); 1039 memcpy(dst->iov_base, src->iov_base, src->iov_len);
1045 xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); 1040 xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
1046 rqstp->rq_arg.len = 0; 1041 rqstp->rq_arg.len = 0;
1042 spin_unlock_bh(&bc_xprt->transport_lock);
1047 return 0; 1043 return 0;
1044unlock_notfound:
1045 printk(KERN_NOTICE
1046 "%s: Got unrecognized reply: "
1047 "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
1048 __func__, ntohl(calldir),
1049 bc_xprt, ntohl(xid));
1050unlock_eagain:
1051 spin_unlock_bh(&bc_xprt->transport_lock);
1052 return -EAGAIN;
1048} 1053}
1049 1054
1050static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) 1055static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 42ded997b223..c6ff94ab1ad6 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -216,6 +216,8 @@ static char *snd_pcm_format_names[] = {
216 FORMAT(DSD_U8), 216 FORMAT(DSD_U8),
217 FORMAT(DSD_U16_LE), 217 FORMAT(DSD_U16_LE),
218 FORMAT(DSD_U32_LE), 218 FORMAT(DSD_U32_LE),
219 FORMAT(DSD_U16_BE),
220 FORMAT(DSD_U32_BE),
219}; 221};
220 222
221const char *snd_pcm_format_name(snd_pcm_format_t format) 223const char *snd_pcm_format_name(snd_pcm_format_t format)
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index ae7a0feb3b76..ebe8444de6c6 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -152,6 +152,14 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
152 .width = 32, .phys = 32, .le = 1, .signd = 0, 152 .width = 32, .phys = 32, .le = 1, .signd = 0,
153 .silence = { 0x69, 0x69, 0x69, 0x69 }, 153 .silence = { 0x69, 0x69, 0x69, 0x69 },
154 }, 154 },
155 [SNDRV_PCM_FORMAT_DSD_U16_BE] = {
156 .width = 16, .phys = 16, .le = 0, .signd = 0,
157 .silence = { 0x69, 0x69 },
158 },
159 [SNDRV_PCM_FORMAT_DSD_U32_BE] = {
160 .width = 32, .phys = 32, .le = 0, .signd = 0,
161 .silence = { 0x69, 0x69, 0x69, 0x69 },
162 },
155 /* FIXME: the following three formats are not defined properly yet */ 163 /* FIXME: the following three formats are not defined properly yet */
156 [SNDRV_PCM_FORMAT_MPEG] = { 164 [SNDRV_PCM_FORMAT_MPEG] = {
157 .le = -1, .signd = -1, 165 .le = -1, .signd = -1,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 16660f312043..48b6c5a3884f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -298,7 +298,8 @@ enum {
298 298
299/* quirks for ATI/AMD HDMI */ 299/* quirks for ATI/AMD HDMI */
300#define AZX_DCAPS_PRESET_ATI_HDMI \ 300#define AZX_DCAPS_PRESET_ATI_HDMI \
301 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB) 301 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB|\
302 AZX_DCAPS_NO_MSI64)
302 303
303/* quirks for Nvidia */ 304/* quirks for Nvidia */
304#define AZX_DCAPS_PRESET_NVIDIA \ 305#define AZX_DCAPS_PRESET_NVIDIA \
@@ -1486,6 +1487,7 @@ static int azx_first_init(struct azx *chip)
1486 struct snd_card *card = chip->card; 1487 struct snd_card *card = chip->card;
1487 int err; 1488 int err;
1488 unsigned short gcap; 1489 unsigned short gcap;
1490 unsigned int dma_bits = 64;
1489 1491
1490#if BITS_PER_LONG != 64 1492#if BITS_PER_LONG != 64
1491 /* Fix up base address on ULI M5461 */ 1493 /* Fix up base address on ULI M5461 */
@@ -1509,9 +1511,14 @@ static int azx_first_init(struct azx *chip)
1509 return -ENXIO; 1511 return -ENXIO;
1510 } 1512 }
1511 1513
1512 if (chip->msi) 1514 if (chip->msi) {
1515 if (chip->driver_caps & AZX_DCAPS_NO_MSI64) {
1516 dev_dbg(card->dev, "Disabling 64bit MSI\n");
1517 pci->no_64bit_msi = true;
1518 }
1513 if (pci_enable_msi(pci) < 0) 1519 if (pci_enable_msi(pci) < 0)
1514 chip->msi = 0; 1520 chip->msi = 0;
1521 }
1515 1522
1516 if (azx_acquire_irq(chip, 0) < 0) 1523 if (azx_acquire_irq(chip, 0) < 0)
1517 return -EBUSY; 1524 return -EBUSY;
@@ -1522,9 +1529,14 @@ static int azx_first_init(struct azx *chip)
1522 gcap = azx_readw(chip, GCAP); 1529 gcap = azx_readw(chip, GCAP);
1523 dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap); 1530 dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap);
1524 1531
1532 /* AMD devices support 40 or 48bit DMA, take the safe one */
1533 if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
1534 dma_bits = 40;
1535
1525 /* disable SB600 64bit support for safety */ 1536 /* disable SB600 64bit support for safety */
1526 if (chip->pci->vendor == PCI_VENDOR_ID_ATI) { 1537 if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
1527 struct pci_dev *p_smbus; 1538 struct pci_dev *p_smbus;
1539 dma_bits = 40;
1528 p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, 1540 p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
1529 PCI_DEVICE_ID_ATI_SBX00_SMBUS, 1541 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
1530 NULL); 1542 NULL);
@@ -1554,9 +1566,11 @@ static int azx_first_init(struct azx *chip)
1554 } 1566 }
1555 1567
1556 /* allow 64bit DMA address if supported by H/W */ 1568 /* allow 64bit DMA address if supported by H/W */
1557 if ((gcap & AZX_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) 1569 if (!(gcap & AZX_GCAP_64OK))
1558 pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64)); 1570 dma_bits = 32;
1559 else { 1571 if (!pci_set_dma_mask(pci, DMA_BIT_MASK(dma_bits))) {
1572 pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(dma_bits));
1573 } else {
1560 pci_set_dma_mask(pci, DMA_BIT_MASK(32)); 1574 pci_set_dma_mask(pci, DMA_BIT_MASK(32));
1561 pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)); 1575 pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
1562 } 1576 }
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index 949cd437eeb2..5016014e57f2 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
171#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ 171#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
172#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ 172#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
173#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ 173#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
174#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
174 175
175/* HD Audio class code */ 176/* HD Audio class code */
176#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 177#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8fea1b86df25..14f16be3f374 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4818,7 +4818,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4818 SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4818 SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4819 SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4819 SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4820 SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4820 SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4821 SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4822 SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4821 SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4823 SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4822 SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4824 SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4823 SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 7c83bab69dee..8c9bf4b7aaf0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -593,10 +593,10 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
593 if (mixer->chip->shutdown) 593 if (mixer->chip->shutdown)
594 ret = -ENODEV; 594 ret = -ENODEV;
595 else 595 else
596 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, 596 ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
597 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 597 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
598 0, wIndex, 598 0, wIndex,
599 &tmp, sizeof(tmp), 1000); 599 &tmp, sizeof(tmp));
600 up_read(&mixer->chip->shutdown_rwsem); 600 up_read(&mixer->chip->shutdown_rwsem);
601 601
602 if (ret < 0) { 602 if (ret < 0) {
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index a5941f80fc5b..60dfe0d28771 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1193,12 +1193,12 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1193 /* iFi Audio micro/nano iDSD */ 1193 /* iFi Audio micro/nano iDSD */
1194 case USB_ID(0x20b1, 0x3008): 1194 case USB_ID(0x20b1, 0x3008):
1195 if (fp->altsetting == 2) 1195 if (fp->altsetting == 2)
1196 return SNDRV_PCM_FMTBIT_DSD_U32_LE; 1196 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1197 break; 1197 break;
1198 /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ 1198 /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
1199 case USB_ID(0x20b1, 0x2009): 1199 case USB_ID(0x20b1, 0x2009):
1200 if (fp->altsetting == 3) 1200 if (fp->altsetting == 3)
1201 return SNDRV_PCM_FMTBIT_DSD_U32_LE; 1201 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1202 break; 1202 break;
1203 default: 1203 default:
1204 break; 1204 break;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 3aaca49de325..aacdb59f30de 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1933,7 +1933,7 @@ out:
1933 1933
1934int kvm_vgic_create(struct kvm *kvm) 1934int kvm_vgic_create(struct kvm *kvm)
1935{ 1935{
1936 int i, vcpu_lock_idx = -1, ret = 0; 1936 int i, vcpu_lock_idx = -1, ret;
1937 struct kvm_vcpu *vcpu; 1937 struct kvm_vcpu *vcpu;
1938 1938
1939 mutex_lock(&kvm->lock); 1939 mutex_lock(&kvm->lock);
@@ -1948,6 +1948,7 @@ int kvm_vgic_create(struct kvm *kvm)
1948 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure 1948 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1949 * that no other VCPUs are run while we create the vgic. 1949 * that no other VCPUs are run while we create the vgic.
1950 */ 1950 */
1951 ret = -EBUSY;
1951 kvm_for_each_vcpu(i, vcpu, kvm) { 1952 kvm_for_each_vcpu(i, vcpu, kvm) {
1952 if (!mutex_trylock(&vcpu->mutex)) 1953 if (!mutex_trylock(&vcpu->mutex))
1953 goto out_unlock; 1954 goto out_unlock;
@@ -1955,11 +1956,10 @@ int kvm_vgic_create(struct kvm *kvm)
1955 } 1956 }
1956 1957
1957 kvm_for_each_vcpu(i, vcpu, kvm) { 1958 kvm_for_each_vcpu(i, vcpu, kvm) {
1958 if (vcpu->arch.has_run_once) { 1959 if (vcpu->arch.has_run_once)
1959 ret = -EBUSY;
1960 goto out_unlock; 1960 goto out_unlock;
1961 }
1962 } 1961 }
1962 ret = 0;
1963 1963
1964 spin_lock_init(&kvm->arch.vgic.lock); 1964 spin_lock_init(&kvm->arch.vgic.lock);
1965 kvm->arch.vgic.in_kernel = true; 1965 kvm->arch.vgic.in_kernel = true;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 25ffac9e947d..3cee7b167052 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
107 107
108static bool largepages_enabled = true; 108static bool largepages_enabled = true;
109 109
110bool kvm_is_mmio_pfn(pfn_t pfn) 110bool kvm_is_reserved_pfn(pfn_t pfn)
111{ 111{
112 if (pfn_valid(pfn)) 112 if (pfn_valid(pfn))
113 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); 113 return PageReserved(pfn_to_page(pfn));
114 114
115 return true; 115 return true;
116} 116}
@@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1321 else if ((vma->vm_flags & VM_PFNMAP)) { 1321 else if ((vma->vm_flags & VM_PFNMAP)) {
1322 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1322 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1323 vma->vm_pgoff; 1323 vma->vm_pgoff;
1324 BUG_ON(!kvm_is_mmio_pfn(pfn)); 1324 BUG_ON(!kvm_is_reserved_pfn(pfn));
1325 } else { 1325 } else {
1326 if (async && vma_is_valid(vma, write_fault)) 1326 if (async && vma_is_valid(vma, write_fault))
1327 *async = true; 1327 *async = true;
@@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
1427 if (is_error_noslot_pfn(pfn)) 1427 if (is_error_noslot_pfn(pfn))
1428 return KVM_ERR_PTR_BAD_PAGE; 1428 return KVM_ERR_PTR_BAD_PAGE;
1429 1429
1430 if (kvm_is_mmio_pfn(pfn)) { 1430 if (kvm_is_reserved_pfn(pfn)) {
1431 WARN_ON(1); 1431 WARN_ON(1);
1432 return KVM_ERR_PTR_BAD_PAGE; 1432 return KVM_ERR_PTR_BAD_PAGE;
1433 } 1433 }
@@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1456 1456
1457void kvm_release_pfn_clean(pfn_t pfn) 1457void kvm_release_pfn_clean(pfn_t pfn)
1458{ 1458{
1459 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) 1459 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
1460 put_page(pfn_to_page(pfn)); 1460 put_page(pfn_to_page(pfn));
1461} 1461}
1462EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1462EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
1477 1477
1478void kvm_set_pfn_dirty(pfn_t pfn) 1478void kvm_set_pfn_dirty(pfn_t pfn)
1479{ 1479{
1480 if (!kvm_is_mmio_pfn(pfn)) { 1480 if (!kvm_is_reserved_pfn(pfn)) {
1481 struct page *page = pfn_to_page(pfn); 1481 struct page *page = pfn_to_page(pfn);
1482 if (!PageReserved(page)) 1482 if (!PageReserved(page))
1483 SetPageDirty(page); 1483 SetPageDirty(page);
@@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1487 1487
1488void kvm_set_pfn_accessed(pfn_t pfn) 1488void kvm_set_pfn_accessed(pfn_t pfn)
1489{ 1489{
1490 if (!kvm_is_mmio_pfn(pfn)) 1490 if (!kvm_is_reserved_pfn(pfn))
1491 mark_page_accessed(pfn_to_page(pfn)); 1491 mark_page_accessed(pfn_to_page(pfn));
1492} 1492}
1493EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1493EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1494 1494
1495void kvm_get_pfn(pfn_t pfn) 1495void kvm_get_pfn(pfn_t pfn)
1496{ 1496{
1497 if (!kvm_is_mmio_pfn(pfn)) 1497 if (!kvm_is_reserved_pfn(pfn))
1498 get_page(pfn_to_page(pfn)); 1498 get_page(pfn_to_page(pfn));
1499} 1499}
1500EXPORT_SYMBOL_GPL(kvm_get_pfn); 1500EXPORT_SYMBOL_GPL(kvm_get_pfn);