aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2014-11-04 23:37:25 -0500
committerOlof Johansson <olof@lixom.net>2014-11-04 23:37:25 -0500
commit83b3d538db83fe37e24b46befa699a4ae8c496f2 (patch)
tree71141d9e170e9f489db186c640ef2a3abf7f1c18 /arch
parent4257412db57900e43716d0b7ddd4f4a51e6ed2f4 (diff)
parent89fbec5b97fbcf08db3a9cd93a340f21f95d38b8 (diff)
Merge tag 'imx-fixes-3.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux into fixes
Merge "ARM: imx: fixes for 3.18, 2nd round" from Shawn Guo: "This is the second round of i.MX fixes for 3.18. The clk-vf610 fix is relatively big, because it needs some adaption to the change made by offending commit dc4805c2e78b (ARM: imx: remove ENABLE and BYPASS bits from clk-pllv3 driver). And it should have been sent to you for earlier -rc inclusion, but unfortunately it got delayed for some time because Stefan wasn't aware of my email address change." The i.MX fixes for 3.18, 2nd round: - Fix a regression on Vybrid platform which is caused by commit dc4805c2e78b (ARM: imx: remove ENABLE and BYPASS bits from clk-pllv3 driver), and results in a missing configuration on PLL clocks. - Fix a regression with i.MX defconfig files where CONFIG_SPI option gets lost accidentally. * tag 'imx-fixes-3.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux: (460 commits) ARM: imx: Fix the removal of CONFIG_SPI option ARM: imx: clk-vf610: define PLL's clock tree + Linux 3.18-rc3 Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/mach-imx/clk-vf610.c134
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c3
-rw-r--r--arch/arm/mm/cache-l2x0.c26
-rw-r--r--arch/arm/mm/dma-mapping.c1
-rw-r--r--arch/arm/mm/highmem.c3
-rw-r--r--arch/arm/mm/init.c8
-rw-r--r--arch/microblaze/Kconfig4
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h1
-rw-r--r--arch/microblaze/kernel/syscall_table.S1
-rw-r--r--arch/microblaze/pci/pci-common.c9
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/mm/copro_fault.c3
-rw-r--r--arch/powerpc/mm/numa.c7
-rw-r--r--arch/powerpc/mm/slice.c3
-rw-r--r--arch/powerpc/perf/hv-24x7.c6
-rw-r--r--arch/powerpc/perf/hv-gpci.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S2
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c6
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c6
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/preempt.h1
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/apb_timer.c2
-rw-r--r--arch/x86/kernel/apic/apic.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile7
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c173
-rw-r--r--arch/x86/kernel/entry_32.S15
-rw-r--r--arch/x86/kernel/i8259.c3
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/tsc.c5
-rw-r--r--arch/x86/kvm/emulate.c55
-rw-r--r--arch/x86/kvm/vmx.c6
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/platform/intel-mid/sfi.c2
54 files changed, 265 insertions, 329 deletions
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 739fcf29c643..bc82a12d4c2c 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -668,6 +668,8 @@
668 bank-width = <2>; 668 bank-width = <2>;
669 pinctrl-names = "default"; 669 pinctrl-names = "default";
670 pinctrl-0 = <&ethernet_pins>; 670 pinctrl-0 = <&ethernet_pins>;
671 power-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>; /* gpio86 */
672 reset-gpios = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* gpio164 */
671 gpmc,device-width = <2>; 673 gpmc,device-width = <2>;
672 gpmc,sync-clk-ps = <0>; 674 gpmc,sync-clk-ps = <0>;
673 gpmc,cs-on-ns = <0>; 675 gpmc,cs-on-ns = <0>;
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index e688741c89aa..e6b0007355f8 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -97,6 +97,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y
97# CONFIG_HW_RANDOM is not set 97# CONFIG_HW_RANDOM is not set
98CONFIG_I2C_CHARDEV=y 98CONFIG_I2C_CHARDEV=y
99CONFIG_I2C_IMX=y 99CONFIG_I2C_IMX=y
100CONFIG_SPI=y
100CONFIG_SPI_IMX=y 101CONFIG_SPI_IMX=y
101CONFIG_SPI_SPIDEV=y 102CONFIG_SPI_SPIDEV=y
102CONFIG_GPIO_SYSFS=y 103CONFIG_GPIO_SYSFS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 8fca6e276b69..6790f1b3f3a1 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -158,6 +158,7 @@ CONFIG_I2C_CHARDEV=y
158CONFIG_I2C_ALGOPCF=m 158CONFIG_I2C_ALGOPCF=m
159CONFIG_I2C_ALGOPCA=m 159CONFIG_I2C_ALGOPCA=m
160CONFIG_I2C_IMX=y 160CONFIG_I2C_IMX=y
161CONFIG_SPI=y
161CONFIG_SPI_IMX=y 162CONFIG_SPI_IMX=y
162CONFIG_GPIO_SYSFS=y 163CONFIG_GPIO_SYSFS=y
163CONFIG_GPIO_MC9S08DZ60=y 164CONFIG_GPIO_MC9S08DZ60=y
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 3aaa75cae90c..705bb7620673 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -412,6 +412,7 @@
412#define __NR_seccomp (__NR_SYSCALL_BASE+383) 412#define __NR_seccomp (__NR_SYSCALL_BASE+383)
413#define __NR_getrandom (__NR_SYSCALL_BASE+384) 413#define __NR_getrandom (__NR_SYSCALL_BASE+384)
414#define __NR_memfd_create (__NR_SYSCALL_BASE+385) 414#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
415#define __NR_bpf (__NR_SYSCALL_BASE+386)
415 416
416/* 417/*
417 * The following SWIs are ARM private. 418 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 713e807621d2..2d2d6087b9b1 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -10,6 +10,7 @@
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13#include <linux/compiler.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
14#include <linux/mm.h> 15#include <linux/mm.h>
15#include <linux/dma-mapping.h> 16#include <linux/dma-mapping.h>
@@ -39,10 +40,19 @@
39 * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c 40 * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
40 * (http://gcc.gnu.org/PR8896) and incorrect structure 41 * (http://gcc.gnu.org/PR8896) and incorrect structure
41 * initialisation in fs/jffs2/erase.c 42 * initialisation in fs/jffs2/erase.c
43 * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
44 * miscompiles find_get_entry(), and can result in EXT3 and EXT4
45 * filesystem corruption (possibly other FS too).
42 */ 46 */
47#ifdef __GNUC__
43#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) 48#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
44#error Your compiler is too buggy; it is known to miscompile kernels. 49#error Your compiler is too buggy; it is known to miscompile kernels.
45#error Known good compilers: 3.3 50#error Known good compilers: 3.3, 4.x
51#endif
52#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
53#error Your compiler is too buggy; it is known to miscompile kernels
54#error and result in filesystem corruption and oopses.
55#endif
46#endif 56#endif
47 57
48int main(void) 58int main(void)
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 9f899d8fdcca..e51833f8cc38 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -395,6 +395,7 @@
395 CALL(sys_seccomp) 395 CALL(sys_seccomp)
396 CALL(sys_getrandom) 396 CALL(sys_getrandom)
397/* 385 */ CALL(sys_memfd_create) 397/* 385 */ CALL(sys_memfd_create)
398 CALL(sys_bpf)
398#ifndef syscalls_counted 399#ifndef syscalls_counted
399.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 400.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
400#define syscalls_counted 401#define syscalls_counted
diff --git a/arch/arm/mach-imx/clk-vf610.c b/arch/arm/mach-imx/clk-vf610.c
index a17818475050..409637254594 100644
--- a/arch/arm/mach-imx/clk-vf610.c
+++ b/arch/arm/mach-imx/clk-vf610.c
@@ -58,8 +58,14 @@
58#define PFD_PLL1_BASE (anatop_base + 0x2b0) 58#define PFD_PLL1_BASE (anatop_base + 0x2b0)
59#define PFD_PLL2_BASE (anatop_base + 0x100) 59#define PFD_PLL2_BASE (anatop_base + 0x100)
60#define PFD_PLL3_BASE (anatop_base + 0xf0) 60#define PFD_PLL3_BASE (anatop_base + 0xf0)
61#define PLL1_CTRL (anatop_base + 0x270)
62#define PLL2_CTRL (anatop_base + 0x30)
61#define PLL3_CTRL (anatop_base + 0x10) 63#define PLL3_CTRL (anatop_base + 0x10)
64#define PLL4_CTRL (anatop_base + 0x70)
65#define PLL5_CTRL (anatop_base + 0xe0)
66#define PLL6_CTRL (anatop_base + 0xa0)
62#define PLL7_CTRL (anatop_base + 0x20) 67#define PLL7_CTRL (anatop_base + 0x20)
68#define ANA_MISC1 (anatop_base + 0x160)
63 69
64static void __iomem *anatop_base; 70static void __iomem *anatop_base;
65static void __iomem *ccm_base; 71static void __iomem *ccm_base;
@@ -67,25 +73,34 @@ static void __iomem *ccm_base;
67/* sources for multiplexer clocks, this is used multiple times */ 73/* sources for multiplexer clocks, this is used multiple times */
68static const char *fast_sels[] = { "firc", "fxosc", }; 74static const char *fast_sels[] = { "firc", "fxosc", };
69static const char *slow_sels[] = { "sirc_32k", "sxosc", }; 75static const char *slow_sels[] = { "sirc_32k", "sxosc", };
70static const char *pll1_sels[] = { "pll1_main", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", }; 76static const char *pll1_sels[] = { "pll1_sys", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", };
71static const char *pll2_sels[] = { "pll2_main", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", }; 77static const char *pll2_sels[] = { "pll2_bus", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", };
72static const char *sys_sels[] = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_main", "pll1_pfd_sel", "pll3_main", }; 78static const char *pll_bypass_src_sels[] = { "fast_clk_sel", "lvds1_in", };
79static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
80static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
81static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
82static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", };
83static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
84static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
85static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
86static const char *sys_sels[] = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_bus", "pll1_pfd_sel", "pll3_usb_otg", };
73static const char *ddr_sels[] = { "pll2_pfd2", "sys_sel", }; 87static const char *ddr_sels[] = { "pll2_pfd2", "sys_sel", };
74static const char *rmii_sels[] = { "enet_ext", "audio_ext", "enet_50m", "enet_25m", }; 88static const char *rmii_sels[] = { "enet_ext", "audio_ext", "enet_50m", "enet_25m", };
75static const char *enet_ts_sels[] = { "enet_ext", "fxosc", "audio_ext", "usb", "enet_ts", "enet_25m", "enet_50m", }; 89static const char *enet_ts_sels[] = { "enet_ext", "fxosc", "audio_ext", "usb", "enet_ts", "enet_25m", "enet_50m", };
76static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", }; 90static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", };
77static const char *sai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", }; 91static const char *sai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", };
78static const char *nfc_sels[] = { "platform_bus", "pll1_pfd1", "pll3_pfd1", "pll3_pfd3", }; 92static const char *nfc_sels[] = { "platform_bus", "pll1_pfd1", "pll3_pfd1", "pll3_pfd3", };
79static const char *qspi_sels[] = { "pll3_main", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", }; 93static const char *qspi_sels[] = { "pll3_usb_otg", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", };
80static const char *esdhc_sels[] = { "pll3_main", "pll3_pfd3", "pll1_pfd3", "platform_bus", }; 94static const char *esdhc_sels[] = { "pll3_usb_otg", "pll3_pfd3", "pll1_pfd3", "platform_bus", };
81static const char *dcu_sels[] = { "pll1_pfd2", "pll3_main", }; 95static const char *dcu_sels[] = { "pll1_pfd2", "pll3_usb_otg", };
82static const char *gpu_sels[] = { "pll2_pfd2", "pll3_pfd2", }; 96static const char *gpu_sels[] = { "pll2_pfd2", "pll3_pfd2", };
83static const char *vadc_sels[] = { "pll6_main_div", "pll3_main_div", "pll3_main", }; 97static const char *vadc_sels[] = { "pll6_video_div", "pll3_usb_otg_div", "pll3_usb_otg", };
84/* FTM counter clock source, not module clock */ 98/* FTM counter clock source, not module clock */
85static const char *ftm_ext_sels[] = {"sirc_128k", "sxosc", "fxosc_half", "audio_ext", }; 99static const char *ftm_ext_sels[] = {"sirc_128k", "sxosc", "fxosc_half", "audio_ext", };
86static const char *ftm_fix_sels[] = { "sxosc", "ipg_bus", }; 100static const char *ftm_fix_sels[] = { "sxosc", "ipg_bus", };
87 101
88static struct clk_div_table pll4_main_div_table[] = { 102
103static struct clk_div_table pll4_audio_div_table[] = {
89 { .val = 0, .div = 1 }, 104 { .val = 0, .div = 1 },
90 { .val = 1, .div = 2 }, 105 { .val = 1, .div = 2 },
91 { .val = 2, .div = 6 }, 106 { .val = 2, .div = 6 },
@@ -120,6 +135,9 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
120 clk[VF610_CLK_AUDIO_EXT] = imx_obtain_fixed_clock("audio_ext", 0); 135 clk[VF610_CLK_AUDIO_EXT] = imx_obtain_fixed_clock("audio_ext", 0);
121 clk[VF610_CLK_ENET_EXT] = imx_obtain_fixed_clock("enet_ext", 0); 136 clk[VF610_CLK_ENET_EXT] = imx_obtain_fixed_clock("enet_ext", 0);
122 137
138 /* Clock source from external clock via LVDs PAD */
139 clk[VF610_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0);
140
123 clk[VF610_CLK_FXOSC_HALF] = imx_clk_fixed_factor("fxosc_half", "fxosc", 1, 2); 141 clk[VF610_CLK_FXOSC_HALF] = imx_clk_fixed_factor("fxosc_half", "fxosc", 1, 2);
124 142
125 np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop"); 143 np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop");
@@ -133,31 +151,63 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
133 clk[VF610_CLK_SLOW_CLK_SEL] = imx_clk_mux("slow_clk_sel", CCM_CCSR, 4, 1, slow_sels, ARRAY_SIZE(slow_sels)); 151 clk[VF610_CLK_SLOW_CLK_SEL] = imx_clk_mux("slow_clk_sel", CCM_CCSR, 4, 1, slow_sels, ARRAY_SIZE(slow_sels));
134 clk[VF610_CLK_FASK_CLK_SEL] = imx_clk_mux("fast_clk_sel", CCM_CCSR, 5, 1, fast_sels, ARRAY_SIZE(fast_sels)); 152 clk[VF610_CLK_FASK_CLK_SEL] = imx_clk_mux("fast_clk_sel", CCM_CCSR, 5, 1, fast_sels, ARRAY_SIZE(fast_sels));
135 153
136 clk[VF610_CLK_PLL1_MAIN] = imx_clk_fixed_factor("pll1_main", "fast_clk_sel", 22, 1); 154 clk[VF610_CLK_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", PLL1_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
137 clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_main", PFD_PLL1_BASE, 0); 155 clk[VF610_CLK_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", PLL2_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
138 clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_main", PFD_PLL1_BASE, 1); 156 clk[VF610_CLK_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", PLL3_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
139 clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_main", PFD_PLL1_BASE, 2); 157 clk[VF610_CLK_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", PLL4_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
140 clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_main", PFD_PLL1_BASE, 3); 158 clk[VF610_CLK_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", PLL5_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
141 159 clk[VF610_CLK_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", PLL6_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
142 clk[VF610_CLK_PLL2_MAIN] = imx_clk_fixed_factor("pll2_main", "fast_clk_sel", 22, 1); 160 clk[VF610_CLK_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", PLL7_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
143 clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_main", PFD_PLL2_BASE, 0); 161
144 clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_main", PFD_PLL2_BASE, 1); 162 clk[VF610_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll1", "pll1_bypass_src", PLL1_CTRL, 0x1);
145 clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_main", PFD_PLL2_BASE, 2); 163 clk[VF610_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", PLL2_CTRL, 0x1);
146 clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_main", PFD_PLL2_BASE, 3); 164 clk[VF610_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", PLL3_CTRL, 0x1);
147 165 clk[VF610_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", PLL4_CTRL, 0x7f);
148 clk[VF610_CLK_PLL3_MAIN] = imx_clk_fixed_factor("pll3_main", "fast_clk_sel", 20, 1); 166 clk[VF610_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll5", "pll5_bypass_src", PLL5_CTRL, 0x3);
149 clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_main", PFD_PLL3_BASE, 0); 167 clk[VF610_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_AV, "pll6", "pll6_bypass_src", PLL6_CTRL, 0x7f);
150 clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_main", PFD_PLL3_BASE, 1); 168 clk[VF610_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", PLL7_CTRL, 0x1);
151 clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_main", PFD_PLL3_BASE, 2); 169
152 clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_main", PFD_PLL3_BASE, 3); 170 clk[VF610_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", PLL1_CTRL, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
153 171 clk[VF610_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", PLL2_CTRL, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
154 clk[VF610_CLK_PLL4_MAIN] = imx_clk_fixed_factor("pll4_main", "fast_clk_sel", 25, 1); 172 clk[VF610_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", PLL3_CTRL, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
155 /* Enet pll: fixed 50Mhz */ 173 clk[VF610_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", PLL4_CTRL, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT);
156 clk[VF610_CLK_PLL5_MAIN] = imx_clk_fixed_factor("pll5_main", "fast_clk_sel", 125, 6); 174 clk[VF610_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", PLL5_CTRL, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
157 /* pll6: default 960Mhz */ 175 clk[VF610_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", PLL6_CTRL, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
158 clk[VF610_CLK_PLL6_MAIN] = imx_clk_fixed_factor("pll6_main", "fast_clk_sel", 40, 1); 176 clk[VF610_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", PLL7_CTRL, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
159 /* pll7: USB1 PLL at 480MHz */ 177
160 clk[VF610_CLK_PLL7_MAIN] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_main", "fast_clk_sel", PLL7_CTRL, 0x2); 178 /* Do not bypass PLLs initially */
179 clk_set_parent(clk[VF610_PLL1_BYPASS], clk[VF610_CLK_PLL1]);
180 clk_set_parent(clk[VF610_PLL2_BYPASS], clk[VF610_CLK_PLL2]);
181 clk_set_parent(clk[VF610_PLL3_BYPASS], clk[VF610_CLK_PLL3]);
182 clk_set_parent(clk[VF610_PLL4_BYPASS], clk[VF610_CLK_PLL4]);
183 clk_set_parent(clk[VF610_PLL5_BYPASS], clk[VF610_CLK_PLL5]);
184 clk_set_parent(clk[VF610_PLL6_BYPASS], clk[VF610_CLK_PLL6]);
185 clk_set_parent(clk[VF610_PLL7_BYPASS], clk[VF610_CLK_PLL7]);
186
187 clk[VF610_CLK_PLL1_SYS] = imx_clk_gate("pll1_sys", "pll1_bypass", PLL1_CTRL, 13);
188 clk[VF610_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", PLL2_CTRL, 13);
189 clk[VF610_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", PLL3_CTRL, 13);
190 clk[VF610_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", PLL4_CTRL, 13);
191 clk[VF610_CLK_PLL5_ENET] = imx_clk_gate("pll5_enet", "pll5_bypass", PLL5_CTRL, 13);
192 clk[VF610_CLK_PLL6_VIDEO] = imx_clk_gate("pll6_video", "pll6_bypass", PLL6_CTRL, 13);
193 clk[VF610_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", PLL7_CTRL, 13);
194
195 clk[VF610_CLK_LVDS1_IN] = imx_clk_gate_exclusive("lvds1_in", "anaclk1", ANA_MISC1, 12, BIT(10));
196
197 clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_sys", PFD_PLL1_BASE, 0);
198 clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_sys", PFD_PLL1_BASE, 1);
199 clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_sys", PFD_PLL1_BASE, 2);
200 clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_sys", PFD_PLL1_BASE, 3);
201
202 clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_bus", PFD_PLL2_BASE, 0);
203 clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_bus", PFD_PLL2_BASE, 1);
204 clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_bus", PFD_PLL2_BASE, 2);
205 clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_bus", PFD_PLL2_BASE, 3);
206
207 clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_usb_otg", PFD_PLL3_BASE, 0);
208 clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_usb_otg", PFD_PLL3_BASE, 1);
209 clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_usb_otg", PFD_PLL3_BASE, 2);
210 clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_usb_otg", PFD_PLL3_BASE, 3);
161 211
162 clk[VF610_CLK_PLL1_PFD_SEL] = imx_clk_mux("pll1_pfd_sel", CCM_CCSR, 16, 3, pll1_sels, 5); 212 clk[VF610_CLK_PLL1_PFD_SEL] = imx_clk_mux("pll1_pfd_sel", CCM_CCSR, 16, 3, pll1_sels, 5);
163 clk[VF610_CLK_PLL2_PFD_SEL] = imx_clk_mux("pll2_pfd_sel", CCM_CCSR, 19, 3, pll2_sels, 5); 213 clk[VF610_CLK_PLL2_PFD_SEL] = imx_clk_mux("pll2_pfd_sel", CCM_CCSR, 19, 3, pll2_sels, 5);
@@ -167,12 +217,12 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
167 clk[VF610_CLK_PLATFORM_BUS] = imx_clk_divider("platform_bus", "sys_bus", CCM_CACRR, 3, 3); 217 clk[VF610_CLK_PLATFORM_BUS] = imx_clk_divider("platform_bus", "sys_bus", CCM_CACRR, 3, 3);
168 clk[VF610_CLK_IPG_BUS] = imx_clk_divider("ipg_bus", "platform_bus", CCM_CACRR, 11, 2); 218 clk[VF610_CLK_IPG_BUS] = imx_clk_divider("ipg_bus", "platform_bus", CCM_CACRR, 11, 2);
169 219
170 clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_main_div", "pll3_main", CCM_CACRR, 20, 1); 220 clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_usb_otg_div", "pll3_usb_otg", CCM_CACRR, 20, 1);
171 clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_main_div", "pll4_main", 0, CCM_CACRR, 6, 3, 0, pll4_main_div_table, &imx_ccm_lock); 221 clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_audio_div", "pll4_audio", 0, CCM_CACRR, 6, 3, 0, pll4_audio_div_table, &imx_ccm_lock);
172 clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_main_div", "pll6_main", CCM_CACRR, 21, 1); 222 clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_video_div", "pll6_video", CCM_CACRR, 21, 1);
173 223
174 clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_main", PLL3_CTRL, 6); 224 clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_usb_otg", PLL3_CTRL, 6);
175 clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_main", PLL7_CTRL, 6); 225 clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_usb_host", PLL7_CTRL, 6);
176 226
177 clk[VF610_CLK_USBC0] = imx_clk_gate2("usbc0", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(4)); 227 clk[VF610_CLK_USBC0] = imx_clk_gate2("usbc0", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(4));
178 clk[VF610_CLK_USBC1] = imx_clk_gate2("usbc1", "ipg_bus", CCM_CCGR7, CCM_CCGRx_CGn(4)); 228 clk[VF610_CLK_USBC1] = imx_clk_gate2("usbc1", "ipg_bus", CCM_CCGR7, CCM_CCGRx_CGn(4));
@@ -191,8 +241,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
191 clk[VF610_CLK_QSPI1_X1_DIV] = imx_clk_divider("qspi1_x1", "qspi1_x2", CCM_CSCDR3, 11, 1); 241 clk[VF610_CLK_QSPI1_X1_DIV] = imx_clk_divider("qspi1_x1", "qspi1_x2", CCM_CSCDR3, 11, 1);
192 clk[VF610_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_x1", CCM_CCGR8, CCM_CCGRx_CGn(4)); 242 clk[VF610_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_x1", CCM_CCGR8, CCM_CCGRx_CGn(4));
193 243
194 clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_main", 1, 10); 244 clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_enet", 1, 10);
195 clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_main", 1, 20); 245 clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_enet", 1, 20);
196 clk[VF610_CLK_ENET_SEL] = imx_clk_mux("enet_sel", CCM_CSCMR2, 4, 2, rmii_sels, 4); 246 clk[VF610_CLK_ENET_SEL] = imx_clk_mux("enet_sel", CCM_CSCMR2, 4, 2, rmii_sels, 4);
197 clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7); 247 clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7);
198 clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24); 248 clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24);
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index c95346c94829..cec9d6c6442c 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -252,9 +252,6 @@ static void __init nokia_n900_legacy_init(void)
252 platform_device_register(&omap3_rom_rng_device); 252 platform_device_register(&omap3_rom_rng_device);
253 253
254 } 254 }
255
256 /* Only on some development boards */
257 gpio_request_one(164, GPIOF_OUT_INIT_LOW, "smc91x reset");
258} 255}
259 256
260static void __init omap3_tao3530_legacy_init(void) 257static void __init omap3_tao3530_legacy_init(void)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 55f9d6e0cc88..5e65ca8dea62 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -956,7 +956,7 @@ static u32 cache_id_part_number_from_dt;
956 * @associativity: variable to return the calculated associativity in 956 * @associativity: variable to return the calculated associativity in
957 * @max_way_size: the maximum size in bytes for the cache ways 957 * @max_way_size: the maximum size in bytes for the cache ways
958 */ 958 */
959static void __init l2x0_cache_size_of_parse(const struct device_node *np, 959static int __init l2x0_cache_size_of_parse(const struct device_node *np,
960 u32 *aux_val, u32 *aux_mask, 960 u32 *aux_val, u32 *aux_mask,
961 u32 *associativity, 961 u32 *associativity,
962 u32 max_way_size) 962 u32 max_way_size)
@@ -974,7 +974,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
974 of_property_read_u32(np, "cache-line-size", &line_size); 974 of_property_read_u32(np, "cache-line-size", &line_size);
975 975
976 if (!cache_size || !sets) 976 if (!cache_size || !sets)
977 return; 977 return -ENODEV;
978 978
979 /* All these l2 caches have the same line = block size actually */ 979 /* All these l2 caches have the same line = block size actually */
980 if (!line_size) { 980 if (!line_size) {
@@ -1009,7 +1009,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
1009 1009
1010 if (way_size > max_way_size) { 1010 if (way_size > max_way_size) {
1011 pr_err("L2C OF: set size %dKB is too large\n", way_size); 1011 pr_err("L2C OF: set size %dKB is too large\n", way_size);
1012 return; 1012 return -EINVAL;
1013 } 1013 }
1014 1014
1015 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", 1015 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
@@ -1027,7 +1027,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
1027 if (way_size_bits < 1 || way_size_bits > 6) { 1027 if (way_size_bits < 1 || way_size_bits > 6) {
1028 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", 1028 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
1029 way_size); 1029 way_size);
1030 return; 1030 return -EINVAL;
1031 } 1031 }
1032 1032
1033 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; 1033 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
@@ -1036,6 +1036,8 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
1036 *aux_val &= ~mask; 1036 *aux_val &= ~mask;
1037 *aux_val |= val; 1037 *aux_val |= val;
1038 *aux_mask &= ~mask; 1038 *aux_mask &= ~mask;
1039
1040 return 0;
1039} 1041}
1040 1042
1041static void __init l2x0_of_parse(const struct device_node *np, 1043static void __init l2x0_of_parse(const struct device_node *np,
@@ -1046,6 +1048,7 @@ static void __init l2x0_of_parse(const struct device_node *np,
1046 u32 dirty = 0; 1048 u32 dirty = 0;
1047 u32 val = 0, mask = 0; 1049 u32 val = 0, mask = 0;
1048 u32 assoc; 1050 u32 assoc;
1051 int ret;
1049 1052
1050 of_property_read_u32(np, "arm,tag-latency", &tag); 1053 of_property_read_u32(np, "arm,tag-latency", &tag);
1051 if (tag) { 1054 if (tag) {
@@ -1068,7 +1071,10 @@ static void __init l2x0_of_parse(const struct device_node *np,
1068 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 1071 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1069 } 1072 }
1070 1073
1071 l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1074 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
1075 if (ret)
1076 return;
1077
1072 if (assoc > 8) { 1078 if (assoc > 8) {
1073 pr_err("l2x0 of: cache setting yield too high associativity\n"); 1079 pr_err("l2x0 of: cache setting yield too high associativity\n");
1074 pr_err("l2x0 of: %d calculated, max 8\n", assoc); 1080 pr_err("l2x0 of: %d calculated, max 8\n", assoc);
@@ -1125,6 +1131,7 @@ static void __init l2c310_of_parse(const struct device_node *np,
1125 u32 tag[3] = { 0, 0, 0 }; 1131 u32 tag[3] = { 0, 0, 0 };
1126 u32 filter[2] = { 0, 0 }; 1132 u32 filter[2] = { 0, 0 };
1127 u32 assoc; 1133 u32 assoc;
1134 int ret;
1128 1135
1129 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1136 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1130 if (tag[0] && tag[1] && tag[2]) 1137 if (tag[0] && tag[1] && tag[2])
@@ -1152,7 +1159,10 @@ static void __init l2c310_of_parse(const struct device_node *np,
1152 l2x0_base + L310_ADDR_FILTER_START); 1159 l2x0_base + L310_ADDR_FILTER_START);
1153 } 1160 }
1154 1161
1155 l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1162 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1163 if (ret)
1164 return;
1165
1156 switch (assoc) { 1166 switch (assoc) {
1157 case 16: 1167 case 16:
1158 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1168 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
@@ -1164,8 +1174,8 @@ static void __init l2c310_of_parse(const struct device_node *np,
1164 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1174 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1165 break; 1175 break;
1166 default: 1176 default:
1167 pr_err("PL310 OF: cache setting yield illegal associativity\n"); 1177 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
1168 pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc); 1178 assoc);
1169 break; 1179 break;
1170 } 1180 }
1171} 1181}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c245d903927f..e8907117861e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1198,7 +1198,6 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1198{ 1198{
1199 return dma_common_pages_remap(pages, size, 1199 return dma_common_pages_remap(pages, size,
1200 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); 1200 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
1201 return NULL;
1202} 1201}
1203 1202
1204/* 1203/*
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 45aeaaca9052..e17ed00828d7 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -127,8 +127,11 @@ void *kmap_atomic_pfn(unsigned long pfn)
127{ 127{
128 unsigned long vaddr; 128 unsigned long vaddr;
129 int idx, type; 129 int idx, type;
130 struct page *page = pfn_to_page(pfn);
130 131
131 pagefault_disable(); 132 pagefault_disable();
133 if (!PageHighMem(page))
134 return page_address(page);
132 135
133 type = kmap_atomic_idx_push(); 136 type = kmap_atomic_idx_push();
134 idx = type + KM_TYPE_NR * smp_processor_id(); 137 idx = type + KM_TYPE_NR * smp_processor_id();
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 92bba32d9230..9481f85c56e6 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -559,10 +559,10 @@ void __init mem_init(void)
559#ifdef CONFIG_MODULES 559#ifdef CONFIG_MODULES
560 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 560 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
561#endif 561#endif
562 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 562 " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
563 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 563 " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
564 " .data : 0x%p" " - 0x%p" " (%4d kB)\n" 564 " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
565 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", 565 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
566 566
567 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 567 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
568 (PAGE_SIZE)), 568 (PAGE_SIZE)),
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 6feded3b0c4c..a7736fa0580c 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -129,6 +129,10 @@ endmenu
129 129
130menu "Kernel features" 130menu "Kernel features"
131 131
132config NR_CPUS
133 int
134 default "1"
135
132config ADVANCED_OPTIONS 136config ADVANCED_OPTIONS
133 bool "Prompt for advanced kernel configuration options" 137 bool "Prompt for advanced kernel configuration options"
134 help 138 help
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index ea4b233647c1..0a53362d5548 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
38 38
39#endif /* __ASSEMBLY__ */ 39#endif /* __ASSEMBLY__ */
40 40
41#define __NR_syscalls 387 41#define __NR_syscalls 388
42 42
43#endif /* _ASM_MICROBLAZE_UNISTD_H */ 43#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index 1c2380bf8fe6..c712677f8a2a 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -402,5 +402,6 @@
402#define __NR_seccomp 384 402#define __NR_seccomp 384
403#define __NR_getrandom 385 403#define __NR_getrandom 385
404#define __NR_memfd_create 386 404#define __NR_memfd_create 386
405#define __NR_bpf 387
405 406
406#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ 407#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index de59ee1d7010..0166e890486c 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -387,3 +387,4 @@ ENTRY(sys_call_table)
387 .long sys_seccomp 387 .long sys_seccomp
388 .long sys_getrandom /* 385 */ 388 .long sys_getrandom /* 385 */
389 .long sys_memfd_create 389 .long sys_memfd_create
390 .long sys_bpf
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 9037914f6985..b30e41c0c033 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -660,8 +660,13 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
660 res = &hose->mem_resources[memno++]; 660 res = &hose->mem_resources[memno++];
661 break; 661 break;
662 } 662 }
663 if (res != NULL) 663 if (res != NULL) {
664 of_pci_range_to_resource(&range, dev, res); 664 res->name = dev->full_name;
665 res->flags = range.flags;
666 res->start = range.cpu_addr;
667 res->end = range.cpu_addr + range.size - 1;
668 res->parent = res->child = res->sibling = NULL;
669 }
665 } 670 }
666 671
667 /* If there's an ISA hole and the pci_mem_offset is -not- matching 672 /* If there's an ISA hole and the pci_mem_offset is -not- matching
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 623f2971ce0e..766b77d527ac 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -71,7 +71,7 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
71 71
72void flush_dcache_icache_hugepage(struct page *page); 72void flush_dcache_icache_hugepage(struct page *page);
73 73
74#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT) 74#if defined(CONFIG_PPC_MM_SLICES)
75int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 75int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
76 unsigned long len); 76 unsigned long len);
77#else 77#else
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 7d8a60068805..ce9577d693be 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -365,3 +365,4 @@ SYSCALL_SPU(renameat2)
365SYSCALL_SPU(seccomp) 365SYSCALL_SPU(seccomp)
366SYSCALL_SPU(getrandom) 366SYSCALL_SPU(getrandom)
367SYSCALL_SPU(memfd_create) 367SYSCALL_SPU(memfd_create)
368SYSCALL_SPU(bpf)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 4e9af3fd43e7..e0da021caa00 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 361 15#define __NR_syscalls 362
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 0688fc06e183..f55351f2e66e 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -383,5 +383,6 @@
383#define __NR_seccomp 358 383#define __NR_seccomp 358
384#define __NR_getrandom 359 384#define __NR_getrandom 359
385#define __NR_memfd_create 360 385#define __NR_memfd_create 360
386#define __NR_bpf 361
386 387
387#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 388#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 0f9939e693df..5a236f082c78 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -99,8 +99,6 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
99 u64 vsid; 99 u64 vsid;
100 int psize, ssize; 100 int psize, ssize;
101 101
102 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
103
104 switch (REGION_ID(ea)) { 102 switch (REGION_ID(ea)) {
105 case USER_REGION_ID: 103 case USER_REGION_ID:
106 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); 104 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
@@ -133,6 +131,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
133 vsid |= mmu_psize_defs[psize].sllp | 131 vsid |= mmu_psize_defs[psize].sllp |
134 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); 132 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
135 133
134 slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
136 slb->vsid = vsid; 135 slb->vsid = vsid;
137 136
138 return 0; 137 return 0;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index e5236c24dc07..b9d1dfdbe5bb 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1509,11 +1509,14 @@ static int update_cpu_topology(void *data)
1509 cpu = smp_processor_id(); 1509 cpu = smp_processor_id();
1510 1510
1511 for (update = data; update; update = update->next) { 1511 for (update = data; update; update = update->next) {
1512 int new_nid = update->new_nid;
1512 if (cpu != update->cpu) 1513 if (cpu != update->cpu)
1513 continue; 1514 continue;
1514 1515
1515 unmap_cpu_from_node(update->cpu); 1516 unmap_cpu_from_node(cpu);
1516 map_cpu_to_node(update->cpu, update->new_nid); 1517 map_cpu_to_node(cpu, new_nid);
1518 set_cpu_numa_node(cpu, new_nid);
1519 set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1517 vdso_getcpu_init(); 1520 vdso_getcpu_init();
1518 } 1521 }
1519 1522
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 8d7bda94d196..ded0ea1afde4 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -682,6 +682,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
682 slice_convert(mm, mask, psize); 682 slice_convert(mm, mask, psize);
683} 683}
684 684
685#ifdef CONFIG_HUGETLB_PAGE
685/* 686/*
686 * is_hugepage_only_range() is used by generic code to verify whether 687 * is_hugepage_only_range() is used by generic code to verify whether
687 * a normal mmap mapping (non hugetlbfs) is valid on a given area. 688 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
@@ -726,4 +727,4 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
726#endif 727#endif
727 return !slice_check_fit(mask, available); 728 return !slice_check_fit(mask, available);
728} 729}
729 730#endif
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 6c8710dd90c9..dba34088da28 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -417,11 +417,6 @@ static int h_24x7_event_add(struct perf_event *event, int flags)
417 return 0; 417 return 0;
418} 418}
419 419
420static int h_24x7_event_idx(struct perf_event *event)
421{
422 return 0;
423}
424
425static struct pmu h_24x7_pmu = { 420static struct pmu h_24x7_pmu = {
426 .task_ctx_nr = perf_invalid_context, 421 .task_ctx_nr = perf_invalid_context,
427 422
@@ -433,7 +428,6 @@ static struct pmu h_24x7_pmu = {
433 .start = h_24x7_event_start, 428 .start = h_24x7_event_start,
434 .stop = h_24x7_event_stop, 429 .stop = h_24x7_event_stop,
435 .read = h_24x7_event_update, 430 .read = h_24x7_event_update,
436 .event_idx = h_24x7_event_idx,
437}; 431};
438 432
439static int hv_24x7_init(void) 433static int hv_24x7_init(void)
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 15fc76c93022..a051fe946c63 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -246,11 +246,6 @@ static int h_gpci_event_init(struct perf_event *event)
246 return 0; 246 return 0;
247} 247}
248 248
249static int h_gpci_event_idx(struct perf_event *event)
250{
251 return 0;
252}
253
254static struct pmu h_gpci_pmu = { 249static struct pmu h_gpci_pmu = {
255 .task_ctx_nr = perf_invalid_context, 250 .task_ctx_nr = perf_invalid_context,
256 251
@@ -262,7 +257,6 @@ static struct pmu h_gpci_pmu = {
262 .start = h_gpci_event_start, 257 .start = h_gpci_event_start,
263 .stop = h_gpci_event_stop, 258 .stop = h_gpci_event_stop,
264 .read = h_gpci_event_update, 259 .read = h_gpci_event_update,
265 .event_idx = h_gpci_event_idx,
266}; 260};
267 261
268static int hv_gpci_init(void) 262static int hv_gpci_init(void)
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index dd2c285ad170..ad4b31df779a 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -191,7 +191,6 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
191{ 191{
192 struct lpc_debugfs_entry *lpc = filp->private_data; 192 struct lpc_debugfs_entry *lpc = filp->private_data;
193 u32 data, pos, len, todo; 193 u32 data, pos, len, todo;
194 __be32 bedata;
195 int rc; 194 int rc;
196 195
197 if (!access_ok(VERIFY_WRITE, ubuf, count)) 196 if (!access_ok(VERIFY_WRITE, ubuf, count))
@@ -214,10 +213,9 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
214 len = 2; 213 len = 2;
215 } 214 }
216 rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos, 215 rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos,
217 &bedata, len); 216 &data, len);
218 if (rc) 217 if (rc)
219 return -ENXIO; 218 return -ENXIO;
220 data = be32_to_cpu(bedata);
221 switch(len) { 219 switch(len) {
222 case 4: 220 case 4:
223 rc = __put_user((u32)data, (u32 __user *)ubuf); 221 rc = __put_user((u32)data, (u32 __user *)ubuf);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e9e2450c1fdd..feb549aa3eea 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -58,7 +58,7 @@ END_FTR_SECTION(0, 1); \
58 */ 58 */
59 59
60#define OPAL_CALL(name, token) \ 60#define OPAL_CALL(name, token) \
61 _GLOBAL(name); \ 61 _GLOBAL_TOC(name); \
62 mflr r0; \ 62 mflr r0; \
63 std r0,16(r1); \ 63 std r0,16(r1); \
64 li r0,token; \ 64 li r0,token; \
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 08e761318c17..b878f12a9597 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1411,11 +1411,6 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags)
1411 perf_pmu_enable(event->pmu); 1411 perf_pmu_enable(event->pmu);
1412} 1412}
1413 1413
1414static int cpumsf_pmu_event_idx(struct perf_event *event)
1415{
1416 return event->hw.idx;
1417}
1418
1419CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); 1414CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
1420CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); 1415CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
1421 1416
@@ -1458,7 +1453,6 @@ static struct pmu cpumf_sampling = {
1458 .stop = cpumsf_pmu_stop, 1453 .stop = cpumsf_pmu_stop,
1459 .read = cpumsf_pmu_read, 1454 .read = cpumsf_pmu_read,
1460 1455
1461 .event_idx = cpumsf_pmu_event_idx,
1462 .attr_groups = cpumsf_pmu_attr_groups, 1456 .attr_groups = cpumsf_pmu_attr_groups,
1463}; 1457};
1464 1458
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 9139d14b9c53..538c10db3537 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = {
118}; 118};
119 119
120static struct resource scif0_resources[] = { 120static struct resource scif0_resources[] = {
121 DEFINE_RES_MEM(0xfffffe80, 0x100), 121 DEFINE_RES_MEM(0xfffffe80, 0x10),
122 DEFINE_RES_IRQ(evt2irq(0x4e0)), 122 DEFINE_RES_IRQ(evt2irq(0x4e0)),
123}; 123};
124 124
@@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = {
143}; 143};
144 144
145static struct resource scif1_resources[] = { 145static struct resource scif1_resources[] = {
146 DEFINE_RES_MEM(0xa4000150, 0x100), 146 DEFINE_RES_MEM(0xa4000150, 0x10),
147 DEFINE_RES_IRQ(evt2irq(0x900)), 147 DEFINE_RES_IRQ(evt2irq(0x900)),
148}; 148};
149 149
@@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = {
169}; 169};
170 170
171static struct resource scif2_resources[] = { 171static struct resource scif2_resources[] = {
172 DEFINE_RES_MEM(0xa4000140, 0x100), 172 DEFINE_RES_MEM(0xa4000140, 0x10),
173 DEFINE_RES_IRQ(evt2irq(0x880)), 173 DEFINE_RES_IRQ(evt2irq(0x880)),
174}; 174};
175 175
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index c842a89b1190..46d83842eddc 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -414,8 +414,9 @@
414#define __NR_seccomp 346 414#define __NR_seccomp 346
415#define __NR_getrandom 347 415#define __NR_getrandom 347
416#define __NR_memfd_create 348 416#define __NR_memfd_create 348
417#define __NR_bpf 349
417 418
418#define NR_syscalls 349 419#define NR_syscalls 350
419 420
420/* Bitmask values returned from kern_features system call. */ 421/* Bitmask values returned from kern_features system call. */
421#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 422#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 6a873c344bc0..ad0cdf497b78 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -86,4 +86,4 @@ sys_call_table:
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create 89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index d9151b6490d8..580cde9370c9 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -87,7 +87,7 @@ sys_call_table32:
87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create 90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
91 91
92#endif /* CONFIG_COMPAT */ 92#endif /* CONFIG_COMPAT */
93 93
@@ -166,4 +166,4 @@ sys_call_table:
166/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 166/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
167 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 167 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
168/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 168/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
169 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create 169 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f2327e88e07c..ded8a6774ac9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -142,6 +142,10 @@ config INSTRUCTION_DECODER
142 def_bool y 142 def_bool y
143 depends on KPROBES || PERF_EVENTS || UPROBES 143 depends on KPROBES || PERF_EVENTS || UPROBES
144 144
145config PERF_EVENTS_INTEL_UNCORE
146 def_bool y
147 depends on PERF_EVENTS && SUP_SUP_INTEL && PCI
148
145config OUTPUT_FORMAT 149config OUTPUT_FORMAT
146 string 150 string
147 default "elf32-i386" if X86_32 151 default "elf32-i386" if X86_32
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 8ffba18395c8..ffe71228fc10 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -157,7 +157,7 @@ ENTRY(ia32_sysenter_target)
157 * ourselves. To save a few cycles, we can check whether 157 * ourselves. To save a few cycles, we can check whether
158 * NT was set instead of doing an unconditional popfq. 158 * NT was set instead of doing an unconditional popfq.
159 */ 159 */
160 testl $X86_EFLAGS_NT,EFLAGS(%rsp) /* saved EFLAGS match cpu */ 160 testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
161 jnz sysenter_fix_flags 161 jnz sysenter_fix_flags
162sysenter_flags_fixed: 162sysenter_flags_fixed:
163 163
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 7024c12f7bfe..400873450e33 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -105,6 +105,7 @@ static __always_inline bool should_resched(void)
105# ifdef CONFIG_CONTEXT_TRACKING 105# ifdef CONFIG_CONTEXT_TRACKING
106 extern asmlinkage void ___preempt_schedule_context(void); 106 extern asmlinkage void ___preempt_schedule_context(void);
107# define __preempt_schedule_context() asm ("call ___preempt_schedule_context") 107# define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
108 extern asmlinkage void preempt_schedule_context(void);
108# endif 109# endif
109#endif 110#endif
110 111
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b436fc735aa4..a142e77693e1 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -397,7 +397,7 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
397 397
398 /* Don't set up the ACPI SCI because it's already set up */ 398 /* Don't set up the ACPI SCI because it's already set up */
399 if (acpi_gbl_FADT.sci_interrupt == gsi) 399 if (acpi_gbl_FADT.sci_interrupt == gsi)
400 return gsi; 400 return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
401 401
402 trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1; 402 trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
403 polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1; 403 polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
@@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
604 604
605int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) 605int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
606{ 606{
607 int irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); 607 int irq;
608 608
609 if (irq >= 0) { 609 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
610 *irqp = gsi;
611 } else {
612 irq = mp_map_gsi_to_irq(gsi,
613 IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
614 if (irq < 0)
615 return -1;
610 *irqp = irq; 616 *irqp = irq;
611 return 0;
612 } 617 }
613 618 return 0;
614 return -1;
615} 619}
616EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); 620EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
617 621
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 5972b108f15a..b708738d016e 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -185,8 +185,6 @@ static void apbt_setup_irq(struct apbt_dev *adev)
185 185
186 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); 186 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
187 irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 187 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
188 /* APB timer irqs are set up as mp_irqs, timer is edge type */
189 __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
190} 188}
191 189
192/* Should be called with per cpu */ 190/* Should be called with per cpu */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 00853b254ab0..ba6cc041edb1 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1297,7 +1297,7 @@ void setup_local_APIC(void)
1297 unsigned int value, queued; 1297 unsigned int value, queued;
1298 int i, j, acked = 0; 1298 int i, j, acked = 0;
1299 unsigned long long tsc = 0, ntsc; 1299 unsigned long long tsc = 0, ntsc;
1300 long long max_loops = cpu_khz; 1300 long long max_loops = cpu_khz ? cpu_khz : 1000000;
1301 1301
1302 if (cpu_has_tsc) 1302 if (cpu_has_tsc)
1303 rdtscll(tsc); 1303 rdtscll(tsc);
@@ -1383,7 +1383,7 @@ void setup_local_APIC(void)
1383 break; 1383 break;
1384 } 1384 }
1385 if (queued) { 1385 if (queued) {
1386 if (cpu_has_tsc) { 1386 if (cpu_has_tsc && cpu_khz) {
1387 rdtscll(ntsc); 1387 rdtscll(ntsc);
1388 max_loops = (cpu_khz << 10) - (ntsc - tsc); 1388 max_loops = (cpu_khz << 10) - (ntsc - tsc);
1389 } else 1389 } else
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 01d5453b5502..e27b49d7c922 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,9 +39,12 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
39endif 39endif
40obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o 40obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
41obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 41obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
42obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o
43obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o
44obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o 42obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o
43
44obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \
45 perf_event_intel_uncore_snb.o \
46 perf_event_intel_uncore_snbep.o \
47 perf_event_intel_uncore_nhmex.o
45endif 48endif
46 49
47 50
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1ef456273172..9cc6b6f25f42 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -213,12 +213,13 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
213{ 213{
214#ifdef CONFIG_X86_F00F_BUG 214#ifdef CONFIG_X86_F00F_BUG
215 /* 215 /*
216 * All current models of Pentium and Pentium with MMX technology CPUs 216 * All models of Pentium and Pentium with MMX technology CPUs
217 * have the F0 0F bug, which lets nonprivileged users lock up the 217 * have the F0 0F bug, which lets nonprivileged users lock up the
218 * system. Announce that the fault handler will be checking for it. 218 * system. Announce that the fault handler will be checking for it.
219 * The Quark is also family 5, but does not have the same bug.
219 */ 220 */
220 clear_cpu_bug(c, X86_BUG_F00F); 221 clear_cpu_bug(c, X86_BUG_F00F);
221 if (!paravirt_enabled() && c->x86 == 5) { 222 if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) {
222 static int f00f_workaround_enabled; 223 static int f00f_workaround_enabled;
223 224
224 set_cpu_bug(c, X86_BUG_F00F); 225 set_cpu_bug(c, X86_BUG_F00F);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 1b8299dd3d91..143e5f5dc855 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -243,8 +243,9 @@ static bool check_hw_exists(void)
243 243
244msr_fail: 244msr_fail:
245 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); 245 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
246 printk(boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR 246 printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
247 "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new); 247 boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
248 reg, val_new);
248 249
249 return false; 250 return false;
250} 251}
@@ -444,12 +445,6 @@ int x86_pmu_hw_config(struct perf_event *event)
444 if (event->attr.type == PERF_TYPE_RAW) 445 if (event->attr.type == PERF_TYPE_RAW)
445 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; 446 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
446 447
447 if (event->attr.sample_period && x86_pmu.limit_period) {
448 if (x86_pmu.limit_period(event, event->attr.sample_period) >
449 event->attr.sample_period)
450 return -EINVAL;
451 }
452
453 return x86_setup_perfctr(event); 448 return x86_setup_perfctr(event);
454} 449}
455 450
@@ -987,9 +982,6 @@ int x86_perf_event_set_period(struct perf_event *event)
987 if (left > x86_pmu.max_period) 982 if (left > x86_pmu.max_period)
988 left = x86_pmu.max_period; 983 left = x86_pmu.max_period;
989 984
990 if (x86_pmu.limit_period)
991 left = x86_pmu.limit_period(event, left);
992
993 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; 985 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
994 986
995 /* 987 /*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index d98a34d435d7..fc5eb390b368 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -445,7 +445,6 @@ struct x86_pmu {
445 struct x86_pmu_quirk *quirks; 445 struct x86_pmu_quirk *quirks;
446 int perfctr_second_write; 446 int perfctr_second_write;
447 bool late_ack; 447 bool late_ack;
448 unsigned (*limit_period)(struct perf_event *event, unsigned l);
449 448
450 /* 449 /*
451 * sysfs attrs 450 * sysfs attrs
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index a73947c53b65..944bf019b74f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -220,15 +220,6 @@ static struct event_constraint intel_hsw_event_constraints[] = {
220 EVENT_CONSTRAINT_END 220 EVENT_CONSTRAINT_END
221}; 221};
222 222
223static struct event_constraint intel_bdw_event_constraints[] = {
224 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
227 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
228 INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
229 EVENT_CONSTRAINT_END
230};
231
232static u64 intel_pmu_event_map(int hw_event) 223static u64 intel_pmu_event_map(int hw_event)
233{ 224{
234 return intel_perfmon_event_map[hw_event]; 225 return intel_perfmon_event_map[hw_event];
@@ -424,126 +415,6 @@ static __initconst const u64 snb_hw_cache_event_ids
424 415
425}; 416};
426 417
427static __initconst const u64 hsw_hw_cache_event_ids
428 [PERF_COUNT_HW_CACHE_MAX]
429 [PERF_COUNT_HW_CACHE_OP_MAX]
430 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
431{
432 [ C(L1D ) ] = {
433 [ C(OP_READ) ] = {
434 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
435 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
436 },
437 [ C(OP_WRITE) ] = {
438 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
439 [ C(RESULT_MISS) ] = 0x0,
440 },
441 [ C(OP_PREFETCH) ] = {
442 [ C(RESULT_ACCESS) ] = 0x0,
443 [ C(RESULT_MISS) ] = 0x0,
444 },
445 },
446 [ C(L1I ) ] = {
447 [ C(OP_READ) ] = {
448 [ C(RESULT_ACCESS) ] = 0x0,
449 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
450 },
451 [ C(OP_WRITE) ] = {
452 [ C(RESULT_ACCESS) ] = -1,
453 [ C(RESULT_MISS) ] = -1,
454 },
455 [ C(OP_PREFETCH) ] = {
456 [ C(RESULT_ACCESS) ] = 0x0,
457 [ C(RESULT_MISS) ] = 0x0,
458 },
459 },
460 [ C(LL ) ] = {
461 [ C(OP_READ) ] = {
462 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
463 [ C(RESULT_ACCESS) ] = 0x1b7,
464 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
465 L3_MISS|ANY_SNOOP */
466 [ C(RESULT_MISS) ] = 0x1b7,
467 },
468 [ C(OP_WRITE) ] = {
469 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE:ALL_RFO */
470 /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
471 [ C(RESULT_MISS) ] = 0x1b7,
472 },
473 [ C(OP_PREFETCH) ] = {
474 [ C(RESULT_ACCESS) ] = 0x0,
475 [ C(RESULT_MISS) ] = 0x0,
476 },
477 },
478 [ C(DTLB) ] = {
479 [ C(OP_READ) ] = {
480 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
481 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
482 },
483 [ C(OP_WRITE) ] = {
484 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
485 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
486 },
487 [ C(OP_PREFETCH) ] = {
488 [ C(RESULT_ACCESS) ] = 0x0,
489 [ C(RESULT_MISS) ] = 0x0,
490 },
491 },
492 [ C(ITLB) ] = {
493 [ C(OP_READ) ] = {
494 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
495 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
496 },
497 [ C(OP_WRITE) ] = {
498 [ C(RESULT_ACCESS) ] = -1,
499 [ C(RESULT_MISS) ] = -1,
500 },
501 [ C(OP_PREFETCH) ] = {
502 [ C(RESULT_ACCESS) ] = -1,
503 [ C(RESULT_MISS) ] = -1,
504 },
505 },
506 [ C(BPU ) ] = {
507 [ C(OP_READ) ] = {
508 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
509 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
510 },
511 [ C(OP_WRITE) ] = {
512 [ C(RESULT_ACCESS) ] = -1,
513 [ C(RESULT_MISS) ] = -1,
514 },
515 [ C(OP_PREFETCH) ] = {
516 [ C(RESULT_ACCESS) ] = -1,
517 [ C(RESULT_MISS) ] = -1,
518 },
519 },
520};
521
522static __initconst const u64 hsw_hw_cache_extra_regs
523 [PERF_COUNT_HW_CACHE_MAX]
524 [PERF_COUNT_HW_CACHE_OP_MAX]
525 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
526{
527 [ C(LL ) ] = {
528 [ C(OP_READ) ] = {
529 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
530 [ C(RESULT_ACCESS) ] = 0x2d5,
531 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
532 L3_MISS|ANY_SNOOP */
533 [ C(RESULT_MISS) ] = 0x3fbc0202d5ull,
534 },
535 [ C(OP_WRITE) ] = {
536 [ C(RESULT_ACCESS) ] = 0x122, /* OFFCORE_RESPONSE:ALL_RFO */
537 /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
538 [ C(RESULT_MISS) ] = 0x3fbc020122ull,
539 },
540 [ C(OP_PREFETCH) ] = {
541 [ C(RESULT_ACCESS) ] = 0x0,
542 [ C(RESULT_MISS) ] = 0x0,
543 },
544 },
545};
546
547static __initconst const u64 westmere_hw_cache_event_ids 418static __initconst const u64 westmere_hw_cache_event_ids
548 [PERF_COUNT_HW_CACHE_MAX] 419 [PERF_COUNT_HW_CACHE_MAX]
549 [PERF_COUNT_HW_CACHE_OP_MAX] 420 [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -2034,24 +1905,6 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2034 return c; 1905 return c;
2035} 1906}
2036 1907
2037/*
2038 * Broadwell:
2039 * The INST_RETIRED.ALL period always needs to have lowest
2040 * 6bits cleared (BDM57). It shall not use a period smaller
2041 * than 100 (BDM11). We combine the two to enforce
2042 * a min-period of 128.
2043 */
2044static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2045{
2046 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
2047 X86_CONFIG(.event=0xc0, .umask=0x01)) {
2048 if (left < 128)
2049 left = 128;
2050 left &= ~0x3fu;
2051 }
2052 return left;
2053}
2054
2055PMU_FORMAT_ATTR(event, "config:0-7" ); 1908PMU_FORMAT_ATTR(event, "config:0-7" );
2056PMU_FORMAT_ATTR(umask, "config:8-15" ); 1909PMU_FORMAT_ATTR(umask, "config:8-15" );
2057PMU_FORMAT_ATTR(edge, "config:18" ); 1910PMU_FORMAT_ATTR(edge, "config:18" );
@@ -2692,8 +2545,8 @@ __init int intel_pmu_init(void)
2692 case 69: /* 22nm Haswell ULT */ 2545 case 69: /* 22nm Haswell ULT */
2693 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ 2546 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
2694 x86_pmu.late_ack = true; 2547 x86_pmu.late_ack = true;
2695 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 2548 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2696 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 2549 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2697 2550
2698 intel_pmu_lbr_init_snb(); 2551 intel_pmu_lbr_init_snb();
2699 2552
@@ -2712,28 +2565,6 @@ __init int intel_pmu_init(void)
2712 pr_cont("Haswell events, "); 2565 pr_cont("Haswell events, ");
2713 break; 2566 break;
2714 2567
2715 case 61: /* 14nm Broadwell Core-M */
2716 x86_pmu.late_ack = true;
2717 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2718 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2719
2720 intel_pmu_lbr_init_snb();
2721
2722 x86_pmu.event_constraints = intel_bdw_event_constraints;
2723 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2724 x86_pmu.extra_regs = intel_snbep_extra_regs;
2725 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2726 /* all extra regs are per-cpu when HT is on */
2727 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2728 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2729
2730 x86_pmu.hw_config = hsw_hw_config;
2731 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2732 x86_pmu.cpu_events = hsw_events_attrs;
2733 x86_pmu.limit_period = bdw_limit_period;
2734 pr_cont("Broadwell events, ");
2735 break;
2736
2737 default: 2568 default:
2738 switch (x86_pmu.version) { 2569 switch (x86_pmu.version) {
2739 case 1: 2570 case 1:
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index b553ed89e5f5..344b63f18d14 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -447,15 +447,14 @@ sysenter_exit:
447sysenter_audit: 447sysenter_audit:
448 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) 448 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
449 jnz syscall_trace_entry 449 jnz syscall_trace_entry
450 addl $4,%esp 450 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
451 CFI_ADJUST_CFA_OFFSET -4 451 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
452 movl %esi,4(%esp) /* 5th arg: 4th syscall arg */ 452 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
453 movl %edx,(%esp) /* 4th arg: 3rd syscall arg */ 453 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
454 /* %ecx already in %ecx 3rd arg: 2nd syscall arg */ 454 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
455 movl %ebx,%edx /* 2nd arg: 1st syscall arg */
456 /* %eax already in %eax 1st arg: syscall number */
457 call __audit_syscall_entry 455 call __audit_syscall_entry
458 pushl_cfi %ebx 456 popl_cfi %ecx /* get that remapped edx off the stack */
457 popl_cfi %ecx /* get that remapped esi off the stack */
459 movl PT_EAX(%esp),%eax /* reload syscall number */ 458 movl PT_EAX(%esp),%eax /* reload syscall number */
460 jmp sysenter_do_call 459 jmp sysenter_do_call
461 460
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 8af817105e29..e7cc5370cd2f 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -111,8 +111,7 @@ static void make_8259A_irq(unsigned int irq)
111{ 111{
112 disable_irq_nosync(irq); 112 disable_irq_nosync(irq);
113 io_apic_irqs &= ~(1<<irq); 113 io_apic_irqs &= ~(1<<irq);
114 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, 114 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
115 i8259A_chip.name);
116 enable_irq(irq); 115 enable_irq(irq);
117} 116}
118 117
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 44f1ed42fdf2..4de73ee78361 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -70,7 +70,6 @@ int vector_used_by_percpu_irq(unsigned int vector)
70void __init init_ISA_irqs(void) 70void __init init_ISA_irqs(void)
71{ 71{
72 struct irq_chip *chip = legacy_pic->chip; 72 struct irq_chip *chip = legacy_pic->chip;
73 const char *name = chip->name;
74 int i; 73 int i;
75 74
76#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) 75#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
@@ -79,7 +78,7 @@ void __init init_ISA_irqs(void)
79 legacy_pic->init(0); 78 legacy_pic->init(0);
80 79
81 for (i = 0; i < nr_legacy_irqs(); i++) 80 for (i = 0; i < nr_legacy_irqs(); i++)
82 irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); 81 irq_set_chip_and_handler(i, chip, handle_level_irq);
83} 82}
84 83
85void __init init_IRQ(void) 84void __init init_IRQ(void)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 235cfd39e0d7..ab08aa2276fb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1128,7 +1128,6 @@ void __init setup_arch(char **cmdline_p)
1128 setup_real_mode(); 1128 setup_real_mode();
1129 1129
1130 memblock_set_current_limit(get_max_mapped()); 1130 memblock_set_current_limit(get_max_mapped());
1131 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1132 1131
1133 /* 1132 /*
1134 * NOTE: On x86-32, only from this point on, fixmaps are ready for use. 1133 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -1159,6 +1158,7 @@ void __init setup_arch(char **cmdline_p)
1159 early_acpi_boot_init(); 1158 early_acpi_boot_init();
1160 1159
1161 initmem_init(); 1160 initmem_init();
1161 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1162 1162
1163 /* 1163 /*
1164 * Reserve memory for crash kernel after SRAT is parsed so that it 1164 * Reserve memory for crash kernel after SRAT is parsed so that it
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2d5200e56357..4d2128ac70bd 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -102,8 +102,6 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
102DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 102DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
103EXPORT_PER_CPU_SYMBOL(cpu_info); 103EXPORT_PER_CPU_SYMBOL(cpu_info);
104 104
105static DEFINE_PER_CPU(struct completion, die_complete);
106
107atomic_t init_deasserted; 105atomic_t init_deasserted;
108 106
109/* 107/*
@@ -1318,6 +1316,8 @@ void cpu_disable_common(void)
1318 fixup_irqs(); 1316 fixup_irqs();
1319} 1317}
1320 1318
1319static DEFINE_PER_CPU(struct completion, die_complete);
1320
1321int native_cpu_disable(void) 1321int native_cpu_disable(void)
1322{ 1322{
1323 int ret; 1323 int ret;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b6025f9e36c6..b7e50bba3bbb 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1166,14 +1166,17 @@ void __init tsc_init(void)
1166 1166
1167 x86_init.timers.tsc_pre_init(); 1167 x86_init.timers.tsc_pre_init();
1168 1168
1169 if (!cpu_has_tsc) 1169 if (!cpu_has_tsc) {
1170 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1170 return; 1171 return;
1172 }
1171 1173
1172 tsc_khz = x86_platform.calibrate_tsc(); 1174 tsc_khz = x86_platform.calibrate_tsc();
1173 cpu_khz = tsc_khz; 1175 cpu_khz = tsc_khz;
1174 1176
1175 if (!tsc_khz) { 1177 if (!tsc_khz) {
1176 mark_tsc_unstable("could not calculate TSC khz"); 1178 mark_tsc_unstable("could not calculate TSC khz");
1179 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1177 return; 1180 return;
1178 } 1181 }
1179 1182
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 749f9fa38254..5edf088ca51e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -574,12 +574,14 @@ static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
574 case 4: 574 case 4:
575 ctxt->_eip = (u32)dst; 575 ctxt->_eip = (u32)dst;
576 break; 576 break;
577#ifdef CONFIG_X86_64
577 case 8: 578 case 8:
578 if ((cs_l && is_noncanonical_address(dst)) || 579 if ((cs_l && is_noncanonical_address(dst)) ||
579 (!cs_l && (dst & ~(u32)-1))) 580 (!cs_l && (dst >> 32) != 0))
580 return emulate_gp(ctxt, 0); 581 return emulate_gp(ctxt, 0);
581 ctxt->_eip = dst; 582 ctxt->_eip = dst;
582 break; 583 break;
584#endif
583 default: 585 default:
584 WARN(1, "unsupported eip assignment size\n"); 586 WARN(1, "unsupported eip assignment size\n");
585 } 587 }
@@ -641,7 +643,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
641 643
642static int __linearize(struct x86_emulate_ctxt *ctxt, 644static int __linearize(struct x86_emulate_ctxt *ctxt,
643 struct segmented_address addr, 645 struct segmented_address addr,
644 unsigned size, bool write, bool fetch, 646 unsigned *max_size, unsigned size,
647 bool write, bool fetch,
645 ulong *linear) 648 ulong *linear)
646{ 649{
647 struct desc_struct desc; 650 struct desc_struct desc;
@@ -652,10 +655,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
652 unsigned cpl; 655 unsigned cpl;
653 656
654 la = seg_base(ctxt, addr.seg) + addr.ea; 657 la = seg_base(ctxt, addr.seg) + addr.ea;
658 *max_size = 0;
655 switch (ctxt->mode) { 659 switch (ctxt->mode) {
656 case X86EMUL_MODE_PROT64: 660 case X86EMUL_MODE_PROT64:
657 if (((signed long)la << 16) >> 16 != la) 661 if (((signed long)la << 16) >> 16 != la)
658 return emulate_gp(ctxt, 0); 662 return emulate_gp(ctxt, 0);
663
664 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
665 if (size > *max_size)
666 goto bad;
659 break; 667 break;
660 default: 668 default:
661 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 669 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
@@ -673,20 +681,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
673 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && 681 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
674 (ctxt->d & NoBigReal)) { 682 (ctxt->d & NoBigReal)) {
675 /* la is between zero and 0xffff */ 683 /* la is between zero and 0xffff */
676 if (la > 0xffff || (u32)(la + size - 1) > 0xffff) 684 if (la > 0xffff)
677 goto bad; 685 goto bad;
686 *max_size = 0x10000 - la;
678 } else if ((desc.type & 8) || !(desc.type & 4)) { 687 } else if ((desc.type & 8) || !(desc.type & 4)) {
679 /* expand-up segment */ 688 /* expand-up segment */
680 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 689 if (addr.ea > lim)
681 goto bad; 690 goto bad;
691 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
682 } else { 692 } else {
683 /* expand-down segment */ 693 /* expand-down segment */
684 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) 694 if (addr.ea <= lim)
685 goto bad; 695 goto bad;
686 lim = desc.d ? 0xffffffff : 0xffff; 696 lim = desc.d ? 0xffffffff : 0xffff;
687 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 697 if (addr.ea > lim)
688 goto bad; 698 goto bad;
699 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
689 } 700 }
701 if (size > *max_size)
702 goto bad;
690 cpl = ctxt->ops->cpl(ctxt); 703 cpl = ctxt->ops->cpl(ctxt);
691 if (!(desc.type & 8)) { 704 if (!(desc.type & 8)) {
692 /* data segment */ 705 /* data segment */
@@ -711,9 +724,9 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
711 return X86EMUL_CONTINUE; 724 return X86EMUL_CONTINUE;
712bad: 725bad:
713 if (addr.seg == VCPU_SREG_SS) 726 if (addr.seg == VCPU_SREG_SS)
714 return emulate_ss(ctxt, sel); 727 return emulate_ss(ctxt, 0);
715 else 728 else
716 return emulate_gp(ctxt, sel); 729 return emulate_gp(ctxt, 0);
717} 730}
718 731
719static int linearize(struct x86_emulate_ctxt *ctxt, 732static int linearize(struct x86_emulate_ctxt *ctxt,
@@ -721,7 +734,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
721 unsigned size, bool write, 734 unsigned size, bool write,
722 ulong *linear) 735 ulong *linear)
723{ 736{
724 return __linearize(ctxt, addr, size, write, false, linear); 737 unsigned max_size;
738 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
725} 739}
726 740
727 741
@@ -746,17 +760,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
746static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) 760static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
747{ 761{
748 int rc; 762 int rc;
749 unsigned size; 763 unsigned size, max_size;
750 unsigned long linear; 764 unsigned long linear;
751 int cur_size = ctxt->fetch.end - ctxt->fetch.data; 765 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
752 struct segmented_address addr = { .seg = VCPU_SREG_CS, 766 struct segmented_address addr = { .seg = VCPU_SREG_CS,
753 .ea = ctxt->eip + cur_size }; 767 .ea = ctxt->eip + cur_size };
754 768
755 size = 15UL ^ cur_size; 769 /*
756 rc = __linearize(ctxt, addr, size, false, true, &linear); 770 * We do not know exactly how many bytes will be needed, and
771 * __linearize is expensive, so fetch as much as possible. We
772 * just have to avoid going beyond the 15 byte limit, the end
773 * of the segment, or the end of the page.
774 *
775 * __linearize is called with size 0 so that it does not do any
776 * boundary check itself. Instead, we use max_size to check
777 * against op_size.
778 */
779 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
757 if (unlikely(rc != X86EMUL_CONTINUE)) 780 if (unlikely(rc != X86EMUL_CONTINUE))
758 return rc; 781 return rc;
759 782
783 size = min_t(unsigned, 15UL ^ cur_size, max_size);
760 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); 784 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
761 785
762 /* 786 /*
@@ -766,7 +790,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
766 * still, we must have hit the 15-byte boundary. 790 * still, we must have hit the 15-byte boundary.
767 */ 791 */
768 if (unlikely(size < op_size)) 792 if (unlikely(size < op_size))
769 return X86EMUL_UNHANDLEABLE; 793 return emulate_gp(ctxt, 0);
794
770 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, 795 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
771 size, &ctxt->exception); 796 size, &ctxt->exception);
772 if (unlikely(rc != X86EMUL_CONTINUE)) 797 if (unlikely(rc != X86EMUL_CONTINUE))
@@ -2012,7 +2037,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2012 2037
2013 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); 2038 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2014 if (rc != X86EMUL_CONTINUE) { 2039 if (rc != X86EMUL_CONTINUE) {
2015 WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); 2040 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2016 /* assigning eip failed; restore the old cs */ 2041 /* assigning eip failed; restore the old cs */
2017 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); 2042 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2018 return rc; 2043 return rc;
@@ -2109,7 +2134,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2109 return rc; 2134 return rc;
2110 rc = assign_eip_far(ctxt, eip, new_desc.l); 2135 rc = assign_eip_far(ctxt, eip, new_desc.l);
2111 if (rc != X86EMUL_CONTINUE) { 2136 if (rc != X86EMUL_CONTINUE) {
2112 WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); 2137 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2113 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); 2138 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2114 } 2139 }
2115 return rc; 2140 return rc;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a8b76c4c95e2..3e556c68351b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4579,7 +4579,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4579 vmcs_write32(TPR_THRESHOLD, 0); 4579 vmcs_write32(TPR_THRESHOLD, 0);
4580 } 4580 }
4581 4581
4582 kvm_vcpu_reload_apic_access_page(vcpu); 4582 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4583 4583
4584 if (vmx_vm_has_apicv(vcpu->kvm)) 4584 if (vmx_vm_has_apicv(vcpu->kvm))
4585 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); 4585 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
@@ -6426,6 +6426,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
6426 const unsigned long *fields = shadow_read_write_fields; 6426 const unsigned long *fields = shadow_read_write_fields;
6427 const int num_fields = max_shadow_read_write_fields; 6427 const int num_fields = max_shadow_read_write_fields;
6428 6428
6429 preempt_disable();
6430
6429 vmcs_load(shadow_vmcs); 6431 vmcs_load(shadow_vmcs);
6430 6432
6431 for (i = 0; i < num_fields; i++) { 6433 for (i = 0; i < num_fields; i++) {
@@ -6449,6 +6451,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
6449 6451
6450 vmcs_clear(shadow_vmcs); 6452 vmcs_clear(shadow_vmcs);
6451 vmcs_load(vmx->loaded_vmcs->vmcs); 6453 vmcs_load(vmx->loaded_vmcs->vmcs);
6454
6455 preempt_enable();
6452} 6456}
6453 6457
6454static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 6458static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index ae242a7c11c7..36de293caf25 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -409,7 +409,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
409 psize = page_level_size(level); 409 psize = page_level_size(level);
410 pmask = page_level_mask(level); 410 pmask = page_level_mask(level);
411 offset = virt_addr & ~pmask; 411 offset = virt_addr & ~pmask;
412 phys_addr = pte_pfn(*pte) << PAGE_SHIFT; 412 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
413 return (phys_addr | offset); 413 return (phys_addr | offset);
414} 414}
415EXPORT_SYMBOL_GPL(slow_virt_to_phys); 415EXPORT_SYMBOL_GPL(slow_virt_to_phys);
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
index 3c53a90fdb18..c14ad34776c4 100644
--- a/arch/x86/platform/intel-mid/sfi.c
+++ b/arch/x86/platform/intel-mid/sfi.c
@@ -106,6 +106,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
106 mp_irq.dstapic = MP_APIC_ALL; 106 mp_irq.dstapic = MP_APIC_ALL;
107 mp_irq.dstirq = pentry->irq; 107 mp_irq.dstirq = pentry->irq;
108 mp_save_irq(&mp_irq); 108 mp_save_irq(&mp_irq);
109 mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
109 } 110 }
110 111
111 return 0; 112 return 0;
@@ -176,6 +177,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
176 mp_irq.dstapic = MP_APIC_ALL; 177 mp_irq.dstapic = MP_APIC_ALL;
177 mp_irq.dstirq = pentry->irq; 178 mp_irq.dstirq = pentry->irq;
178 mp_save_irq(&mp_irq); 179 mp_save_irq(&mp_irq);
180 mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
179 } 181 }
180 return 0; 182 return 0;
181} 183}