diff options
author | David S. Miller <davem@davemloft.net> | 2014-01-06 17:37:45 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-06 17:37:45 -0500 |
commit | 56a4342dfe3145cd66f766adccb28fd9b571606d (patch) | |
tree | d1593764488ff8cbb0b83cb9ae35fd968bf81760 /arch | |
parent | 805c1f4aedaba1bc8d839e7c27b128083dd5c2f0 (diff) | |
parent | fe0d692bbc645786bce1a98439e548ae619269f5 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
ipv6 tunnel statistic bug fixes conflicting with consolidation into
generic sw per-cpu net stats.
qlogic conflict between queue counting bug fix and the addition
of multiple MAC address support.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
40 files changed, 305 insertions, 163 deletions
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 68125dd766c6..39e58d1cdf90 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h | |||
@@ -8,7 +8,11 @@ | |||
8 | 8 | ||
9 | /******** no-legacy-syscalls-ABI *******/ | 9 | /******** no-legacy-syscalls-ABI *******/ |
10 | 10 | ||
11 | #ifndef _UAPI_ASM_ARC_UNISTD_H | 11 | /* |
12 | * Non-typical guard macro to enable inclusion twice in ARCH sys.c | ||
13 | * That is how the Generic syscall wrapper generator works | ||
14 | */ | ||
15 | #if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) | ||
12 | #define _UAPI_ASM_ARC_UNISTD_H | 16 | #define _UAPI_ASM_ARC_UNISTD_H |
13 | 17 | ||
14 | #define __ARCH_WANT_SYS_EXECVE | 18 | #define __ARCH_WANT_SYS_EXECVE |
@@ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls) | |||
36 | #define __NR_sysfs (__NR_arch_specific_syscall + 3) | 40 | #define __NR_sysfs (__NR_arch_specific_syscall + 3) |
37 | __SYSCALL(__NR_sysfs, sys_sysfs) | 41 | __SYSCALL(__NR_sysfs, sys_sysfs) |
38 | 42 | ||
43 | #undef __SYSCALL | ||
44 | |||
39 | #endif | 45 | #endif |
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index ee845fad939b..9987dd0e9c59 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi | |||
@@ -87,9 +87,9 @@ | |||
87 | interrupts = <1 9 0xf04>; | 87 | interrupts = <1 9 0xf04>; |
88 | }; | 88 | }; |
89 | 89 | ||
90 | gpio0: gpio@ffc40000 { | 90 | gpio0: gpio@e6050000 { |
91 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 91 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
92 | reg = <0 0xffc40000 0 0x2c>; | 92 | reg = <0 0xe6050000 0 0x50>; |
93 | interrupt-parent = <&gic>; | 93 | interrupt-parent = <&gic>; |
94 | interrupts = <0 4 0x4>; | 94 | interrupts = <0 4 0x4>; |
95 | #gpio-cells = <2>; | 95 | #gpio-cells = <2>; |
@@ -99,9 +99,9 @@ | |||
99 | interrupt-controller; | 99 | interrupt-controller; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | gpio1: gpio@ffc41000 { | 102 | gpio1: gpio@e6051000 { |
103 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 103 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
104 | reg = <0 0xffc41000 0 0x2c>; | 104 | reg = <0 0xe6051000 0 0x50>; |
105 | interrupt-parent = <&gic>; | 105 | interrupt-parent = <&gic>; |
106 | interrupts = <0 5 0x4>; | 106 | interrupts = <0 5 0x4>; |
107 | #gpio-cells = <2>; | 107 | #gpio-cells = <2>; |
@@ -111,9 +111,9 @@ | |||
111 | interrupt-controller; | 111 | interrupt-controller; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | gpio2: gpio@ffc42000 { | 114 | gpio2: gpio@e6052000 { |
115 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 115 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
116 | reg = <0 0xffc42000 0 0x2c>; | 116 | reg = <0 0xe6052000 0 0x50>; |
117 | interrupt-parent = <&gic>; | 117 | interrupt-parent = <&gic>; |
118 | interrupts = <0 6 0x4>; | 118 | interrupts = <0 6 0x4>; |
119 | #gpio-cells = <2>; | 119 | #gpio-cells = <2>; |
@@ -123,9 +123,9 @@ | |||
123 | interrupt-controller; | 123 | interrupt-controller; |
124 | }; | 124 | }; |
125 | 125 | ||
126 | gpio3: gpio@ffc43000 { | 126 | gpio3: gpio@e6053000 { |
127 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 127 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
128 | reg = <0 0xffc43000 0 0x2c>; | 128 | reg = <0 0xe6053000 0 0x50>; |
129 | interrupt-parent = <&gic>; | 129 | interrupt-parent = <&gic>; |
130 | interrupts = <0 7 0x4>; | 130 | interrupts = <0 7 0x4>; |
131 | #gpio-cells = <2>; | 131 | #gpio-cells = <2>; |
@@ -135,9 +135,9 @@ | |||
135 | interrupt-controller; | 135 | interrupt-controller; |
136 | }; | 136 | }; |
137 | 137 | ||
138 | gpio4: gpio@ffc44000 { | 138 | gpio4: gpio@e6054000 { |
139 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 139 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
140 | reg = <0 0xffc44000 0 0x2c>; | 140 | reg = <0 0xe6054000 0 0x50>; |
141 | interrupt-parent = <&gic>; | 141 | interrupt-parent = <&gic>; |
142 | interrupts = <0 8 0x4>; | 142 | interrupts = <0 8 0x4>; |
143 | #gpio-cells = <2>; | 143 | #gpio-cells = <2>; |
@@ -147,9 +147,9 @@ | |||
147 | interrupt-controller; | 147 | interrupt-controller; |
148 | }; | 148 | }; |
149 | 149 | ||
150 | gpio5: gpio@ffc45000 { | 150 | gpio5: gpio@e6055000 { |
151 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 151 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
152 | reg = <0 0xffc45000 0 0x2c>; | 152 | reg = <0 0xe6055000 0 0x50>; |
153 | interrupt-parent = <&gic>; | 153 | interrupt-parent = <&gic>; |
154 | interrupts = <0 9 0x4>; | 154 | interrupts = <0 9 0x4>; |
155 | #gpio-cells = <2>; | 155 | #gpio-cells = <2>; |
@@ -241,7 +241,7 @@ | |||
241 | 241 | ||
242 | sdhi0: sdhi@ee100000 { | 242 | sdhi0: sdhi@ee100000 { |
243 | compatible = "renesas,sdhi-r8a7790"; | 243 | compatible = "renesas,sdhi-r8a7790"; |
244 | reg = <0 0xee100000 0 0x100>; | 244 | reg = <0 0xee100000 0 0x200>; |
245 | interrupt-parent = <&gic>; | 245 | interrupt-parent = <&gic>; |
246 | interrupts = <0 165 4>; | 246 | interrupts = <0 165 4>; |
247 | cap-sd-highspeed; | 247 | cap-sd-highspeed; |
@@ -250,7 +250,7 @@ | |||
250 | 250 | ||
251 | sdhi1: sdhi@ee120000 { | 251 | sdhi1: sdhi@ee120000 { |
252 | compatible = "renesas,sdhi-r8a7790"; | 252 | compatible = "renesas,sdhi-r8a7790"; |
253 | reg = <0 0xee120000 0 0x100>; | 253 | reg = <0 0xee120000 0 0x200>; |
254 | interrupt-parent = <&gic>; | 254 | interrupt-parent = <&gic>; |
255 | interrupts = <0 166 4>; | 255 | interrupts = <0 166 4>; |
256 | cap-sd-highspeed; | 256 | cap-sd-highspeed; |
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 4ec8d82b0492..44a59c3abfb0 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c | |||
@@ -242,12 +242,18 @@ static void __init ldp_display_init(void) | |||
242 | 242 | ||
243 | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) | 243 | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) |
244 | { | 244 | { |
245 | int res; | ||
246 | |||
245 | /* LCD enable GPIO */ | 247 | /* LCD enable GPIO */ |
246 | ldp_lcd_pdata.enable_gpio = gpio + 7; | 248 | ldp_lcd_pdata.enable_gpio = gpio + 7; |
247 | 249 | ||
248 | /* Backlight enable GPIO */ | 250 | /* Backlight enable GPIO */ |
249 | ldp_lcd_pdata.backlight_gpio = gpio + 15; | 251 | ldp_lcd_pdata.backlight_gpio = gpio + 15; |
250 | 252 | ||
253 | res = platform_device_register(&ldp_lcd_device); | ||
254 | if (res) | ||
255 | pr_err("Unable to register LCD: %d\n", res); | ||
256 | |||
251 | return 0; | 257 | return 0; |
252 | } | 258 | } |
253 | 259 | ||
@@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { | |||
346 | 352 | ||
347 | static struct platform_device *ldp_devices[] __initdata = { | 353 | static struct platform_device *ldp_devices[] __initdata = { |
348 | &ldp_gpio_keys_device, | 354 | &ldp_gpio_keys_device, |
349 | &ldp_lcd_device, | ||
350 | }; | 355 | }; |
351 | 356 | ||
352 | #ifdef CONFIG_OMAP_MUX | 357 | #ifdef CONFIG_OMAP_MUX |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 58347bb874a0..4cf165502b35 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
@@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = { | |||
101 | { "dss_hdmi", "omapdss_hdmi", -1 }, | 101 | { "dss_hdmi", "omapdss_hdmi", -1 }, |
102 | }; | 102 | }; |
103 | 103 | ||
104 | static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | ||
105 | { | ||
106 | u32 enable_mask, enable_shift; | ||
107 | u32 pipd_mask, pipd_shift; | ||
108 | u32 reg; | ||
109 | |||
110 | if (dsi_id == 0) { | ||
111 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; | ||
112 | enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT; | ||
113 | pipd_mask = OMAP4_DSI1_PIPD_MASK; | ||
114 | pipd_shift = OMAP4_DSI1_PIPD_SHIFT; | ||
115 | } else if (dsi_id == 1) { | ||
116 | enable_mask = OMAP4_DSI2_LANEENABLE_MASK; | ||
117 | enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT; | ||
118 | pipd_mask = OMAP4_DSI2_PIPD_MASK; | ||
119 | pipd_shift = OMAP4_DSI2_PIPD_SHIFT; | ||
120 | } else { | ||
121 | return -ENODEV; | ||
122 | } | ||
123 | |||
124 | reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); | ||
125 | |||
126 | reg &= ~enable_mask; | ||
127 | reg &= ~pipd_mask; | ||
128 | |||
129 | reg |= (lanes << enable_shift) & enable_mask; | ||
130 | reg |= (lanes << pipd_shift) & pipd_mask; | ||
131 | |||
132 | omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); | ||
133 | |||
134 | return 0; | ||
135 | } | ||
136 | |||
104 | static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) | 137 | static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) |
105 | { | 138 | { |
139 | if (cpu_is_omap44xx()) | ||
140 | return omap4_dsi_mux_pads(dsi_id, lane_mask); | ||
141 | |||
106 | return 0; | 142 | return 0; |
107 | } | 143 | } |
108 | 144 | ||
109 | static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) | 145 | static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) |
110 | { | 146 | { |
147 | if (cpu_is_omap44xx()) | ||
148 | omap4_dsi_mux_pads(dsi_id, 0); | ||
111 | } | 149 | } |
112 | 150 | ||
113 | static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) | 151 | static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) |
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index 56cebb05509e..d23c77fadb31 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | |||
@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = { | |||
796 | 796 | ||
797 | /* gpmc */ | 797 | /* gpmc */ |
798 | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { | 798 | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { |
799 | { .irq = 20 }, | 799 | { .irq = 20 + OMAP_INTC_START, }, |
800 | { .irq = -1 } | 800 | { .irq = -1 } |
801 | }; | 801 | }; |
802 | 802 | ||
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = { | |||
841 | }; | 841 | }; |
842 | 842 | ||
843 | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { | 843 | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { |
844 | { .irq = 52 }, | 844 | { .irq = 52 + OMAP_INTC_START, }, |
845 | { .irq = -1 } | 845 | { .irq = -1 } |
846 | }; | 846 | }; |
847 | 847 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index d33742908f97..4c3b1e6df508 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = { | |||
2165 | }; | 2165 | }; |
2166 | 2166 | ||
2167 | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { | 2167 | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { |
2168 | { .irq = 20 }, | 2168 | { .irq = 20 + OMAP_INTC_START, }, |
2169 | { .irq = -1 } | 2169 | { .irq = -1 } |
2170 | }; | 2170 | }; |
2171 | 2171 | ||
@@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = { | |||
2999 | 2999 | ||
3000 | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; | 3000 | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; |
3001 | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { | 3001 | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { |
3002 | { .irq = 24 }, | 3002 | { .irq = 24 + OMAP_INTC_START, }, |
3003 | { .irq = -1 } | 3003 | { .irq = -1 } |
3004 | }; | 3004 | }; |
3005 | 3005 | ||
@@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = { | |||
3041 | 3041 | ||
3042 | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; | 3042 | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; |
3043 | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { | 3043 | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { |
3044 | { .irq = 28 }, | 3044 | { .irq = 28 + OMAP_INTC_START, }, |
3045 | { .irq = -1 } | 3045 | { .irq = -1 } |
3046 | }; | 3046 | }; |
3047 | 3047 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index db32d5380b11..18f333c440db 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = { | |||
1637 | .class = &dra7xx_uart_hwmod_class, | 1637 | .class = &dra7xx_uart_hwmod_class, |
1638 | .clkdm_name = "l4per_clkdm", | 1638 | .clkdm_name = "l4per_clkdm", |
1639 | .main_clk = "uart1_gfclk_mux", | 1639 | .main_clk = "uart1_gfclk_mux", |
1640 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 1640 | .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS, |
1641 | .prcm = { | 1641 | .prcm = { |
1642 | .omap4 = { | 1642 | .omap4 = { |
1643 | .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, | 1643 | .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, |
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h index 2a086e8373eb..958cd6af9384 100644 --- a/arch/arm/mach-pxa/include/mach/lubbock.h +++ b/arch/arm/mach-pxa/include/mach/lubbock.h | |||
@@ -10,6 +10,8 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <mach/irqs.h> | ||
14 | |||
13 | #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS | 15 | #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS |
14 | 16 | ||
15 | #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS | 17 | #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS |
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c index 7eb9a10fc1af..2fddf38192df 100644 --- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c +++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c | |||
@@ -8,8 +8,6 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/clk-provider.h> | ||
12 | #include <linux/irqchip.h> | ||
13 | #include <linux/of_platform.h> | 11 | #include <linux/of_platform.h> |
14 | 12 | ||
15 | #include <asm/mach/arch.h> | 13 | #include <asm/mach/arch.h> |
@@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void) | |||
48 | panic("SoC is not S3C64xx!"); | 46 | panic("SoC is not S3C64xx!"); |
49 | } | 47 | } |
50 | 48 | ||
51 | static void __init s3c64xx_dt_init_irq(void) | ||
52 | { | ||
53 | of_clk_init(NULL); | ||
54 | samsung_wdt_reset_of_init(); | ||
55 | irqchip_init(); | ||
56 | }; | ||
57 | |||
58 | static void __init s3c64xx_dt_init_machine(void) | 49 | static void __init s3c64xx_dt_init_machine(void) |
59 | { | 50 | { |
51 | samsung_wdt_reset_of_init(); | ||
60 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 52 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
61 | } | 53 | } |
62 | 54 | ||
@@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)") | |||
79 | /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ | 71 | /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ |
80 | .dt_compat = s3c64xx_dt_compat, | 72 | .dt_compat = s3c64xx_dt_compat, |
81 | .map_io = s3c64xx_dt_map_io, | 73 | .map_io = s3c64xx_dt_map_io, |
82 | .init_irq = s3c64xx_dt_init_irq, | ||
83 | .init_machine = s3c64xx_dt_init_machine, | 74 | .init_machine = s3c64xx_dt_init_machine, |
84 | .restart = s3c64xx_dt_restart, | 75 | .restart = s3c64xx_dt_restart, |
85 | MACHINE_END | 76 | MACHINE_END |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 958e3cbf0ac2..c18689123023 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
@@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { | |||
614 | REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), | 614 | REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), |
615 | }; | 615 | }; |
616 | 616 | ||
617 | /* Fixed 3.3V regulator used by LCD backlight */ | ||
618 | static struct regulator_consumer_supply fixed5v0_power_consumers[] = { | ||
619 | REGULATOR_SUPPLY("power", "pwm-backlight.0"), | ||
620 | }; | ||
621 | |||
617 | /* Fixed 3.3V regulator to be used by SDHI0 */ | 622 | /* Fixed 3.3V regulator to be used by SDHI0 */ |
618 | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { | 623 | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { |
619 | REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), | 624 | REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), |
@@ -1196,6 +1201,8 @@ static void __init eva_init(void) | |||
1196 | 1201 | ||
1197 | regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, | 1202 | regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, |
1198 | ARRAY_SIZE(fixed3v3_power_consumers), 3300000); | 1203 | ARRAY_SIZE(fixed3v3_power_consumers), 3300000); |
1204 | regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, | ||
1205 | ARRAY_SIZE(fixed5v0_power_consumers), 5000000); | ||
1199 | 1206 | ||
1200 | pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); | 1207 | pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); |
1201 | pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); | 1208 | pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); |
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index 38611526fe9a..3c4995aebd22 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c | |||
@@ -679,7 +679,7 @@ static void __init bockw_init(void) | |||
679 | .id = i, | 679 | .id = i, |
680 | .data = &rsnd_card_info[i], | 680 | .data = &rsnd_card_info[i], |
681 | .size_data = sizeof(struct asoc_simple_card_info), | 681 | .size_data = sizeof(struct asoc_simple_card_info), |
682 | .dma_mask = ~0, | 682 | .dma_mask = DMA_BIT_MASK(32), |
683 | }; | 683 | }; |
684 | 684 | ||
685 | platform_device_register_full(&cardinfo); | 685 | platform_device_register_full(&cardinfo); |
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index a8d3ce646fb9..e0406fd37390 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c | |||
@@ -245,7 +245,9 @@ static void __init lager_init(void) | |||
245 | { | 245 | { |
246 | lager_add_standard_devices(); | 246 | lager_add_standard_devices(); |
247 | 247 | ||
248 | phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); | 248 | if (IS_ENABLED(CONFIG_PHYLIB)) |
249 | phy_register_fixup_for_id("r8a7790-ether-ff:01", | ||
250 | lager_ksz8041_fixup); | ||
249 | } | 251 | } |
250 | 252 | ||
251 | static const char * const lager_boards_compat_dt[] __initconst = { | 253 | static const char * const lager_boards_compat_dt[] __initconst = { |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 83e4f959ee47..85501238b425 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, | |||
96 | struct remap_data *info = data; | 96 | struct remap_data *info = data; |
97 | struct page *page = info->pages[info->index++]; | 97 | struct page *page = info->pages[info->index++]; |
98 | unsigned long pfn = page_to_pfn(page); | 98 | unsigned long pfn = page_to_pfn(page); |
99 | pte_t pte = pfn_pte(pfn, info->prot); | 99 | pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); |
100 | 100 | ||
101 | if (map_foreign_page(pfn, info->fgmfn, info->domid)) | 101 | if (map_foreign_page(pfn, info->fgmfn, info->domid)) |
102 | return -EFAULT; | 102 | return -EFAULT; |
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void) | |||
224 | } | 224 | } |
225 | if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) | 225 | if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) |
226 | return 0; | 226 | return 0; |
227 | xen_hvm_resume_frames = res.start >> PAGE_SHIFT; | 227 | xen_hvm_resume_frames = res.start; |
228 | xen_events_irq = irq_of_parse_and_map(node, 0); | 228 | xen_events_irq = irq_of_parse_and_map(node, 0); |
229 | pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", | 229 | pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", |
230 | version, xen_events_irq, xen_hvm_resume_frames); | 230 | version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT)); |
231 | xen_domain_type = XEN_HVM_DOMAIN; | 231 | xen_domain_type = XEN_HVM_DOMAIN; |
232 | 232 | ||
233 | xen_setup_features(); | 233 | xen_setup_features(); |
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index 2820f1a6eebe..dde3fc9c49f0 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h | |||
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | |||
23 | unsigned long offset, size_t size, enum dma_data_direction dir, | 23 | unsigned long offset, size_t size, enum dma_data_direction dir, |
24 | struct dma_attrs *attrs) | 24 | struct dma_attrs *attrs) |
25 | { | 25 | { |
26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
27 | } | 26 | } |
28 | 27 | ||
29 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 28 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
30 | size_t size, enum dma_data_direction dir, | 29 | size_t size, enum dma_data_direction dir, |
31 | struct dma_attrs *attrs) | 30 | struct dma_attrs *attrs) |
32 | { | 31 | { |
33 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
34 | } | 32 | } |
35 | 33 | ||
36 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | 34 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, |
37 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 35 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
38 | { | 36 | { |
39 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
40 | } | 37 | } |
41 | 38 | ||
42 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | 39 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, |
43 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 40 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
44 | { | 41 | { |
45 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
46 | } | 42 | } |
47 | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ | 43 | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ |
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6777a2192b83..6a8928bba03c 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, | |||
214 | { | 214 | { |
215 | int err, len, type, disabled = !ctrl.enabled; | 215 | int err, len, type, disabled = !ctrl.enabled; |
216 | 216 | ||
217 | if (disabled) { | 217 | attr->disabled = disabled; |
218 | len = 0; | 218 | if (disabled) |
219 | type = HW_BREAKPOINT_EMPTY; | 219 | return 0; |
220 | } else { | 220 | |
221 | err = arch_bp_generic_fields(ctrl, &len, &type); | 221 | err = arch_bp_generic_fields(ctrl, &len, &type); |
222 | if (err) | 222 | if (err) |
223 | return err; | 223 | return err; |
224 | 224 | ||
225 | switch (note_type) { | 225 | switch (note_type) { |
226 | case NT_ARM_HW_BREAK: | 226 | case NT_ARM_HW_BREAK: |
227 | if ((type & HW_BREAKPOINT_X) != type) | 227 | if ((type & HW_BREAKPOINT_X) != type) |
228 | return -EINVAL; | ||
229 | break; | ||
230 | case NT_ARM_HW_WATCH: | ||
231 | if ((type & HW_BREAKPOINT_RW) != type) | ||
232 | return -EINVAL; | ||
233 | break; | ||
234 | default: | ||
235 | return -EINVAL; | 228 | return -EINVAL; |
236 | } | 229 | break; |
230 | case NT_ARM_HW_WATCH: | ||
231 | if ((type & HW_BREAKPOINT_RW) != type) | ||
232 | return -EINVAL; | ||
233 | break; | ||
234 | default: | ||
235 | return -EINVAL; | ||
237 | } | 236 | } |
238 | 237 | ||
239 | attr->bp_len = len; | 238 | attr->bp_len = len; |
240 | attr->bp_type = type; | 239 | attr->bp_type = type; |
241 | attr->disabled = disabled; | ||
242 | 240 | ||
243 | return 0; | 241 | return 0; |
244 | } | 242 | } |
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts index 4177b62240c2..a618dfc13e4c 100644 --- a/arch/powerpc/boot/dts/mpc5125twr.dts +++ b/arch/powerpc/boot/dts/mpc5125twr.dts | |||
@@ -58,7 +58,6 @@ | |||
58 | compatible = "fsl,mpc5121-immr"; | 58 | compatible = "fsl,mpc5121-immr"; |
59 | #address-cells = <1>; | 59 | #address-cells = <1>; |
60 | #size-cells = <1>; | 60 | #size-cells = <1>; |
61 | #interrupt-cells = <2>; | ||
62 | ranges = <0x0 0x80000000 0x400000>; | 61 | ranges = <0x0 0x80000000 0x400000>; |
63 | reg = <0x80000000 0x400000>; | 62 | reg = <0x80000000 0x400000>; |
64 | bus-frequency = <66000000>; // 66 MHz ips bus | 63 | bus-frequency = <66000000>; // 66 MHz ips bus |
@@ -189,6 +188,10 @@ | |||
189 | reg = <0xA000 0x1000>; | 188 | reg = <0xA000 0x1000>; |
190 | }; | 189 | }; |
191 | 190 | ||
191 | // disable USB1 port | ||
192 | // TODO: | ||
193 | // correct pinmux config and fix USB3320 ulpi dependency | ||
194 | // before re-enabling it | ||
192 | usb@3000 { | 195 | usb@3000 { |
193 | compatible = "fsl,mpc5121-usb2-dr"; | 196 | compatible = "fsl,mpc5121-usb2-dr"; |
194 | reg = <0x3000 0x400>; | 197 | reg = <0x3000 0x400>; |
@@ -197,6 +200,7 @@ | |||
197 | interrupts = <43 0x8>; | 200 | interrupts = <43 0x8>; |
198 | dr_mode = "host"; | 201 | dr_mode = "host"; |
199 | phy_type = "ulpi"; | 202 | phy_type = "ulpi"; |
203 | status = "disabled"; | ||
200 | }; | 204 | }; |
201 | 205 | ||
202 | // 5125 PSCs are not 52xx or 5121 PSC compatible | 206 | // 5125 PSCs are not 52xx or 5121 PSC compatible |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 894662a5d4d5..243ce69ad685 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -284,7 +284,7 @@ do_kvm_##n: \ | |||
284 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ | 284 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ |
285 | beq- 1f; \ | 285 | beq- 1f; \ |
286 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ | 286 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ |
287 | 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ | 287 | 1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ |
288 | blt+ cr1,3f; /* abort if it is */ \ | 288 | blt+ cr1,3f; /* abort if it is */ \ |
289 | li r1,(n); /* will be reloaded later */ \ | 289 | li r1,(n); /* will be reloaded later */ \ |
290 | sth r1,PACA_TRAP_SAVE(r13); \ | 290 | sth r1,PACA_TRAP_SAVE(r13); \ |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b76674d..bc23b1ba7980 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); | |||
192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); | 192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); |
193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); | 193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); |
194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); | 194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); |
195 | extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | ||
196 | struct kvm_vcpu *vcpu); | ||
197 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||
198 | struct kvmppc_book3s_shadow_vcpu *svcpu); | ||
195 | 199 | ||
196 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | 200 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) |
197 | { | 201 | { |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348a4db9..192917d2239c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -79,6 +79,7 @@ struct kvmppc_host_state { | |||
79 | ulong vmhandler; | 79 | ulong vmhandler; |
80 | ulong scratch0; | 80 | ulong scratch0; |
81 | ulong scratch1; | 81 | ulong scratch1; |
82 | ulong scratch2; | ||
82 | u8 in_guest; | 83 | u8 in_guest; |
83 | u8 restore_hid5; | 84 | u8 restore_hid5; |
84 | u8 napping; | 85 | u8 napping; |
@@ -106,6 +107,7 @@ struct kvmppc_host_state { | |||
106 | }; | 107 | }; |
107 | 108 | ||
108 | struct kvmppc_book3s_shadow_vcpu { | 109 | struct kvmppc_book3s_shadow_vcpu { |
110 | bool in_use; | ||
109 | ulong gpr[14]; | 111 | ulong gpr[14]; |
110 | u32 cr; | 112 | u32 cr; |
111 | u32 xer; | 113 | u32 xer; |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee12610af02..aace90547614 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); | |||
35 | extern void enable_kernel_spe(void); | 35 | extern void enable_kernel_spe(void); |
36 | extern void giveup_spe(struct task_struct *); | 36 | extern void giveup_spe(struct task_struct *); |
37 | extern void load_up_spe(struct task_struct *); | 37 | extern void load_up_spe(struct task_struct *); |
38 | extern void switch_booke_debug_regs(struct thread_struct *new_thread); | 38 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); |
39 | 39 | ||
40 | #ifndef CONFIG_SMP | 40 | #ifndef CONFIG_SMP |
41 | extern void discard_lazy_cpu_state(void); | 41 | extern void discard_lazy_cpu_state(void); |
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h index 5f1b1e3c2137..8296381ae432 100644 --- a/arch/powerpc/include/asm/unaligned.h +++ b/arch/powerpc/include/asm/unaligned.h | |||
@@ -4,13 +4,18 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | /* | 6 | /* |
7 | * The PowerPC can do unaligned accesses itself in big endian mode. | 7 | * The PowerPC can do unaligned accesses itself based on its endian mode. |
8 | */ | 8 | */ |
9 | #include <linux/unaligned/access_ok.h> | 9 | #include <linux/unaligned/access_ok.h> |
10 | #include <linux/unaligned/generic.h> | 10 | #include <linux/unaligned/generic.h> |
11 | 11 | ||
12 | #ifdef __LITTLE_ENDIAN__ | ||
13 | #define get_unaligned __get_unaligned_le | ||
14 | #define put_unaligned __put_unaligned_le | ||
15 | #else | ||
12 | #define get_unaligned __get_unaligned_be | 16 | #define get_unaligned __get_unaligned_be |
13 | #define put_unaligned __put_unaligned_be | 17 | #define put_unaligned __put_unaligned_be |
18 | #endif | ||
14 | 19 | ||
15 | #endif /* __KERNEL__ */ | 20 | #endif /* __KERNEL__ */ |
16 | #endif /* _ASM_POWERPC_UNALIGNED_H */ | 21 | #endif /* _ASM_POWERPC_UNALIGNED_H */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2ea5cc033ec8..d3de01066f7d 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -576,6 +576,7 @@ int main(void) | |||
576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | 576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); |
577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | 577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); |
578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | 578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); |
579 | HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); | ||
579 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | 580 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); |
580 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); | 581 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); |
581 | HSTATE_FIELD(HSTATE_NAPPING, napping); | 582 | HSTATE_FIELD(HSTATE_NAPPING, napping); |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 2ae41aba4053..4f0946de2d5c 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1) | |||
80 | * of the function that the cpu should jump to to continue | 80 | * of the function that the cpu should jump to to continue |
81 | * initialization. | 81 | * initialization. |
82 | */ | 82 | */ |
83 | .balign 8 | ||
83 | .globl __secondary_hold_spinloop | 84 | .globl __secondary_hold_spinloop |
84 | __secondary_hold_spinloop: | 85 | __secondary_hold_spinloop: |
85 | .llong 0x0 | 86 | .llong 0x0 |
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start) | |||
470 | mtctr r8 | 471 | mtctr r8 |
471 | bctr | 472 | bctr |
472 | 473 | ||
474 | .balign 8 | ||
473 | p_end: .llong _end - _stext | 475 | p_end: .llong _end - _stext |
474 | 476 | ||
475 | 4: /* Now copy the rest of the kernel up to _end */ | 477 | 4: /* Now copy the rest of the kernel up to _end */ |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3386d8ab7eb0..4a96556fd2d4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
339 | #endif | 339 | #endif |
340 | } | 340 | } |
341 | 341 | ||
342 | static void prime_debug_regs(struct thread_struct *thread) | 342 | static void prime_debug_regs(struct debug_reg *debug) |
343 | { | 343 | { |
344 | /* | 344 | /* |
345 | * We could have inherited MSR_DE from userspace, since | 345 | * We could have inherited MSR_DE from userspace, since |
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
348 | */ | 348 | */ |
349 | mtmsr(mfmsr() & ~MSR_DE); | 349 | mtmsr(mfmsr() & ~MSR_DE); |
350 | 350 | ||
351 | mtspr(SPRN_IAC1, thread->debug.iac1); | 351 | mtspr(SPRN_IAC1, debug->iac1); |
352 | mtspr(SPRN_IAC2, thread->debug.iac2); | 352 | mtspr(SPRN_IAC2, debug->iac2); |
353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
354 | mtspr(SPRN_IAC3, thread->debug.iac3); | 354 | mtspr(SPRN_IAC3, debug->iac3); |
355 | mtspr(SPRN_IAC4, thread->debug.iac4); | 355 | mtspr(SPRN_IAC4, debug->iac4); |
356 | #endif | 356 | #endif |
357 | mtspr(SPRN_DAC1, thread->debug.dac1); | 357 | mtspr(SPRN_DAC1, debug->dac1); |
358 | mtspr(SPRN_DAC2, thread->debug.dac2); | 358 | mtspr(SPRN_DAC2, debug->dac2); |
359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
360 | mtspr(SPRN_DVC1, thread->debug.dvc1); | 360 | mtspr(SPRN_DVC1, debug->dvc1); |
361 | mtspr(SPRN_DVC2, thread->debug.dvc2); | 361 | mtspr(SPRN_DVC2, debug->dvc2); |
362 | #endif | 362 | #endif |
363 | mtspr(SPRN_DBCR0, thread->debug.dbcr0); | 363 | mtspr(SPRN_DBCR0, debug->dbcr0); |
364 | mtspr(SPRN_DBCR1, thread->debug.dbcr1); | 364 | mtspr(SPRN_DBCR1, debug->dbcr1); |
365 | #ifdef CONFIG_BOOKE | 365 | #ifdef CONFIG_BOOKE |
366 | mtspr(SPRN_DBCR2, thread->debug.dbcr2); | 366 | mtspr(SPRN_DBCR2, debug->dbcr2); |
367 | #endif | 367 | #endif |
368 | } | 368 | } |
369 | /* | 369 | /* |
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
371 | * debug registers, set the debug registers from the values | 371 | * debug registers, set the debug registers from the values |
372 | * stored in the new thread. | 372 | * stored in the new thread. |
373 | */ | 373 | */ |
374 | void switch_booke_debug_regs(struct thread_struct *new_thread) | 374 | void switch_booke_debug_regs(struct debug_reg *new_debug) |
375 | { | 375 | { |
376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) | 376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) |
377 | || (new_thread->debug.dbcr0 & DBCR0_IDM)) | 377 | || (new_debug->dbcr0 & DBCR0_IDM)) |
378 | prime_debug_regs(new_thread); | 378 | prime_debug_regs(new_debug); |
379 | } | 379 | } |
380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); | 380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); |
381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
683 | #endif /* CONFIG_SMP */ | 683 | #endif /* CONFIG_SMP */ |
684 | 684 | ||
685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
686 | switch_booke_debug_regs(&new->thread); | 686 | switch_booke_debug_regs(&new->thread.debug); |
687 | #else | 687 | #else |
688 | /* | 688 | /* |
689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | 689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3ff587a8b7d..c5d148434c08 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
469 | slb_v = vcpu->kvm->arch.vrma_slb_v; | 469 | slb_v = vcpu->kvm->arch.vrma_slb_v; |
470 | } | 470 | } |
471 | 471 | ||
472 | preempt_disable(); | ||
472 | /* Find the HPTE in the hash table */ | 473 | /* Find the HPTE in the hash table */ |
473 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | 474 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, |
474 | HPTE_V_VALID | HPTE_V_ABSENT); | 475 | HPTE_V_VALID | HPTE_V_ABSENT); |
475 | if (index < 0) | 476 | if (index < 0) { |
477 | preempt_enable(); | ||
476 | return -ENOENT; | 478 | return -ENOENT; |
479 | } | ||
477 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 480 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); |
478 | v = hptep[0] & ~HPTE_V_HVLOCK; | 481 | v = hptep[0] & ~HPTE_V_HVLOCK; |
479 | gr = kvm->arch.revmap[index].guest_rpte; | 482 | gr = kvm->arch.revmap[index].guest_rpte; |
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
481 | /* Unlock the HPTE */ | 484 | /* Unlock the HPTE */ |
482 | asm volatile("lwsync" : : : "memory"); | 485 | asm volatile("lwsync" : : : "memory"); |
483 | hptep[0] = v; | 486 | hptep[0] = v; |
487 | preempt_enable(); | ||
484 | 488 | ||
485 | gpte->eaddr = eaddr; | 489 | gpte->eaddr = eaddr; |
486 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | 490 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); |
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
665 | return -EFAULT; | 669 | return -EFAULT; |
666 | } else { | 670 | } else { |
667 | page = pages[0]; | 671 | page = pages[0]; |
672 | pfn = page_to_pfn(page); | ||
668 | if (PageHuge(page)) { | 673 | if (PageHuge(page)) { |
669 | page = compound_head(page); | 674 | page = compound_head(page); |
670 | pte_size <<= compound_order(page); | 675 | pte_size <<= compound_order(page); |
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
689 | } | 694 | } |
690 | rcu_read_unlock_sched(); | 695 | rcu_read_unlock_sched(); |
691 | } | 696 | } |
692 | pfn = page_to_pfn(page); | ||
693 | } | 697 | } |
694 | 698 | ||
695 | ret = -EFAULT; | 699 | ret = -EFAULT; |
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
707 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | 711 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; |
708 | } | 712 | } |
709 | 713 | ||
710 | /* Set the HPTE to point to pfn */ | 714 | /* |
711 | r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); | 715 | * Set the HPTE to point to pfn. |
716 | * Since the pfn is at PAGE_SIZE granularity, make sure we | ||
717 | * don't mask out lower-order bits if psize < PAGE_SIZE. | ||
718 | */ | ||
719 | if (psize < PAGE_SIZE) | ||
720 | psize = PAGE_SIZE; | ||
721 | r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); | ||
712 | if (hpte_is_writable(r) && !write_ok) | 722 | if (hpte_is_writable(r) && !write_ok) |
713 | r = hpte_make_readonly(r); | 723 | r = hpte_make_readonly(r); |
714 | ret = RESUME_GUEST; | 724 | ret = RESUME_GUEST; |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f1c3bc..b51d5db78068 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) | |||
131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | 131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
132 | { | 132 | { |
133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
134 | unsigned long flags; | ||
134 | 135 | ||
135 | spin_lock(&vcpu->arch.tbacct_lock); | 136 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
136 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && | 137 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && |
137 | vc->preempt_tb != TB_NIL) { | 138 | vc->preempt_tb != TB_NIL) { |
138 | vc->stolen_tb += mftb() - vc->preempt_tb; | 139 | vc->stolen_tb += mftb() - vc->preempt_tb; |
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | |||
143 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | 144 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; |
144 | vcpu->arch.busy_preempt = TB_NIL; | 145 | vcpu->arch.busy_preempt = TB_NIL; |
145 | } | 146 | } |
146 | spin_unlock(&vcpu->arch.tbacct_lock); | 147 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
147 | } | 148 | } |
148 | 149 | ||
149 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) | 150 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
150 | { | 151 | { |
151 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 152 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
153 | unsigned long flags; | ||
152 | 154 | ||
153 | spin_lock(&vcpu->arch.tbacct_lock); | 155 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
154 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | 156 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) |
155 | vc->preempt_tb = mftb(); | 157 | vc->preempt_tb = mftb(); |
156 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) | 158 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) |
157 | vcpu->arch.busy_preempt = mftb(); | 159 | vcpu->arch.busy_preempt = mftb(); |
158 | spin_unlock(&vcpu->arch.tbacct_lock); | 160 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
159 | } | 161 | } |
160 | 162 | ||
161 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) | 163 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) | |||
486 | */ | 488 | */ |
487 | if (vc->vcore_state != VCORE_INACTIVE && | 489 | if (vc->vcore_state != VCORE_INACTIVE && |
488 | vc->runner->arch.run_task != current) { | 490 | vc->runner->arch.run_task != current) { |
489 | spin_lock(&vc->runner->arch.tbacct_lock); | 491 | spin_lock_irq(&vc->runner->arch.tbacct_lock); |
490 | p = vc->stolen_tb; | 492 | p = vc->stolen_tb; |
491 | if (vc->preempt_tb != TB_NIL) | 493 | if (vc->preempt_tb != TB_NIL) |
492 | p += now - vc->preempt_tb; | 494 | p += now - vc->preempt_tb; |
493 | spin_unlock(&vc->runner->arch.tbacct_lock); | 495 | spin_unlock_irq(&vc->runner->arch.tbacct_lock); |
494 | } else { | 496 | } else { |
495 | p = vc->stolen_tb; | 497 | p = vc->stolen_tb; |
496 | } | 498 | } |
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | |||
512 | core_stolen = vcore_stolen_time(vc, now); | 514 | core_stolen = vcore_stolen_time(vc, now); |
513 | stolen = core_stolen - vcpu->arch.stolen_logged; | 515 | stolen = core_stolen - vcpu->arch.stolen_logged; |
514 | vcpu->arch.stolen_logged = core_stolen; | 516 | vcpu->arch.stolen_logged = core_stolen; |
515 | spin_lock(&vcpu->arch.tbacct_lock); | 517 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
516 | stolen += vcpu->arch.busy_stolen; | 518 | stolen += vcpu->arch.busy_stolen; |
517 | vcpu->arch.busy_stolen = 0; | 519 | vcpu->arch.busy_stolen = 0; |
518 | spin_unlock(&vcpu->arch.tbacct_lock); | 520 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
519 | if (!dt || !vpa) | 521 | if (!dt || !vpa) |
520 | return; | 522 | return; |
521 | memset(dt, 0, sizeof(struct dtl_entry)); | 523 | memset(dt, 0, sizeof(struct dtl_entry)); |
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
589 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | 591 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) |
590 | return RESUME_HOST; | 592 | return RESUME_HOST; |
591 | 593 | ||
594 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
592 | rc = kvmppc_rtas_hcall(vcpu); | 595 | rc = kvmppc_rtas_hcall(vcpu); |
596 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
593 | 597 | ||
594 | if (rc == -ENOENT) | 598 | if (rc == -ENOENT) |
595 | return RESUME_HOST; | 599 | return RESUME_HOST; |
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | |||
1115 | 1119 | ||
1116 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) | 1120 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
1117 | return; | 1121 | return; |
1118 | spin_lock(&vcpu->arch.tbacct_lock); | 1122 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
1119 | now = mftb(); | 1123 | now = mftb(); |
1120 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | 1124 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - |
1121 | vcpu->arch.stolen_logged; | 1125 | vcpu->arch.stolen_logged; |
1122 | vcpu->arch.busy_preempt = now; | 1126 | vcpu->arch.busy_preempt = now; |
1123 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | 1127 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
1124 | spin_unlock(&vcpu->arch.tbacct_lock); | 1128 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
1125 | --vc->n_runnable; | 1129 | --vc->n_runnable; |
1126 | list_del(&vcpu->arch.run_list); | 1130 | list_del(&vcpu->arch.run_list); |
1127 | } | 1131 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9c515440ad1a..8689e2e30857 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
225 | is_io = pa & (HPTE_R_I | HPTE_R_W); | 225 | is_io = pa & (HPTE_R_I | HPTE_R_W); |
226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); | 226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); |
227 | pa &= PAGE_MASK; | 227 | pa &= PAGE_MASK; |
228 | pa |= gpa & ~PAGE_MASK; | ||
228 | } else { | 229 | } else { |
229 | /* Translate to host virtual address */ | 230 | /* Translate to host virtual address */ |
230 | hva = __gfn_to_hva_memslot(memslot, gfn); | 231 | hva = __gfn_to_hva_memslot(memslot, gfn); |
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
238 | ptel = hpte_make_readonly(ptel); | 239 | ptel = hpte_make_readonly(ptel); |
239 | is_io = hpte_cache_bits(pte_val(pte)); | 240 | is_io = hpte_cache_bits(pte_val(pte)); |
240 | pa = pte_pfn(pte) << PAGE_SHIFT; | 241 | pa = pte_pfn(pte) << PAGE_SHIFT; |
242 | pa |= hva & (pte_size - 1); | ||
243 | pa |= gpa & ~PAGE_MASK; | ||
241 | } | 244 | } |
242 | } | 245 | } |
243 | 246 | ||
244 | if (pte_size < psize) | 247 | if (pte_size < psize) |
245 | return H_PARAMETER; | 248 | return H_PARAMETER; |
246 | if (pa && pte_size > psize) | ||
247 | pa |= gpa & (pte_size - 1); | ||
248 | 249 | ||
249 | ptel &= ~(HPTE_R_PP0 - psize); | 250 | ptel &= ~(HPTE_R_PP0 - psize); |
250 | ptel |= pa; | 251 | ptel |= pa; |
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = { | |||
749 | 20, /* 1M, unsupported */ | 750 | 20, /* 1M, unsupported */ |
750 | }; | 751 | }; |
751 | 752 | ||
753 | /* When called from virtmode, this func should be protected by | ||
754 | * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK | ||
755 | * can trigger deadlock issue. | ||
756 | */ | ||
752 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | 757 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, |
753 | unsigned long valid) | 758 | unsigned long valid) |
754 | { | 759 | { |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc8de75b1925..be4fa04a37c9 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
153 | 153 | ||
154 | 13: b machine_check_fwnmi | 154 | 13: b machine_check_fwnmi |
155 | 155 | ||
156 | |||
157 | /* | 156 | /* |
158 | * We come in here when wakened from nap mode on a secondary hw thread. | 157 | * We come in here when wakened from nap mode on a secondary hw thread. |
159 | * Relocation is off and most register values are lost. | 158 | * Relocation is off and most register values are lost. |
@@ -224,6 +223,11 @@ kvm_start_guest: | |||
224 | /* Clear our vcpu pointer so we don't come back in early */ | 223 | /* Clear our vcpu pointer so we don't come back in early */ |
225 | li r0, 0 | 224 | li r0, 0 |
226 | std r0, HSTATE_KVM_VCPU(r13) | 225 | std r0, HSTATE_KVM_VCPU(r13) |
226 | /* | ||
227 | * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing | ||
228 | * the nap_count, because once the increment to nap_count is | ||
229 | * visible we could be given another vcpu. | ||
230 | */ | ||
227 | lwsync | 231 | lwsync |
228 | /* Clear any pending IPI - we're an offline thread */ | 232 | /* Clear any pending IPI - we're an offline thread */ |
229 | ld r5, HSTATE_XICS_PHYS(r13) | 233 | ld r5, HSTATE_XICS_PHYS(r13) |
@@ -241,7 +245,6 @@ kvm_start_guest: | |||
241 | /* increment the nap count and then go to nap mode */ | 245 | /* increment the nap count and then go to nap mode */ |
242 | ld r4, HSTATE_KVM_VCORE(r13) | 246 | ld r4, HSTATE_KVM_VCORE(r13) |
243 | addi r4, r4, VCORE_NAP_COUNT | 247 | addi r4, r4, VCORE_NAP_COUNT |
244 | lwsync /* make previous updates visible */ | ||
245 | 51: lwarx r3, 0, r4 | 248 | 51: lwarx r3, 0, r4 |
246 | addi r3, r3, 1 | 249 | addi r3, r3, 1 |
247 | stwcx. r3, 0, r4 | 250 | stwcx. r3, 0, r4 |
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv: | |||
751 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | 754 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 |
752 | * guest R13 saved in SPRN_SCRATCH0 | 755 | * guest R13 saved in SPRN_SCRATCH0 |
753 | */ | 756 | */ |
754 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | 757 | std r9, HSTATE_SCRATCH2(r13) |
755 | std r9, HSTATE_HOST_R2(r13) | ||
756 | 758 | ||
757 | lbz r9, HSTATE_IN_GUEST(r13) | 759 | lbz r9, HSTATE_IN_GUEST(r13) |
758 | cmpwi r9, KVM_GUEST_MODE_HOST_HV | 760 | cmpwi r9, KVM_GUEST_MODE_HOST_HV |
759 | beq kvmppc_bad_host_intr | 761 | beq kvmppc_bad_host_intr |
760 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | 762 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
761 | cmpwi r9, KVM_GUEST_MODE_GUEST | 763 | cmpwi r9, KVM_GUEST_MODE_GUEST |
762 | ld r9, HSTATE_HOST_R2(r13) | 764 | ld r9, HSTATE_SCRATCH2(r13) |
763 | beq kvmppc_interrupt_pr | 765 | beq kvmppc_interrupt_pr |
764 | #endif | 766 | #endif |
765 | /* We're now back in the host but in guest MMU context */ | 767 | /* We're now back in the host but in guest MMU context */ |
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv: | |||
779 | std r6, VCPU_GPR(R6)(r9) | 781 | std r6, VCPU_GPR(R6)(r9) |
780 | std r7, VCPU_GPR(R7)(r9) | 782 | std r7, VCPU_GPR(R7)(r9) |
781 | std r8, VCPU_GPR(R8)(r9) | 783 | std r8, VCPU_GPR(R8)(r9) |
782 | ld r0, HSTATE_HOST_R2(r13) | 784 | ld r0, HSTATE_SCRATCH2(r13) |
783 | std r0, VCPU_GPR(R9)(r9) | 785 | std r0, VCPU_GPR(R9)(r9) |
784 | std r10, VCPU_GPR(R10)(r9) | 786 | std r10, VCPU_GPR(R10)(r9) |
785 | std r11, VCPU_GPR(R11)(r9) | 787 | std r11, VCPU_GPR(R11)(r9) |
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
990 | */ | 992 | */ |
991 | /* Increment the threads-exiting-guest count in the 0xff00 | 993 | /* Increment the threads-exiting-guest count in the 0xff00 |
992 | bits of vcore->entry_exit_count */ | 994 | bits of vcore->entry_exit_count */ |
993 | lwsync | ||
994 | ld r5,HSTATE_KVM_VCORE(r13) | 995 | ld r5,HSTATE_KVM_VCORE(r13) |
995 | addi r6,r5,VCORE_ENTRY_EXIT | 996 | addi r6,r5,VCORE_ENTRY_EXIT |
996 | 41: lwarx r3,0,r6 | 997 | 41: lwarx r3,0,r6 |
997 | addi r0,r3,0x100 | 998 | addi r0,r3,0x100 |
998 | stwcx. r0,0,r6 | 999 | stwcx. r0,0,r6 |
999 | bne 41b | 1000 | bne 41b |
1000 | lwsync | 1001 | isync /* order stwcx. vs. reading napping_threads */ |
1001 | 1002 | ||
1002 | /* | 1003 | /* |
1003 | * At this point we have an interrupt that we have to pass | 1004 | * At this point we have an interrupt that we have to pass |
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
1030 | sld r0,r0,r4 | 1031 | sld r0,r0,r4 |
1031 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ | 1032 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ |
1032 | beq 43f | 1033 | beq 43f |
1034 | /* Order entry/exit update vs. IPIs */ | ||
1035 | sync | ||
1033 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ | 1036 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ |
1034 | subf r6,r4,r13 | 1037 | subf r6,r4,r13 |
1035 | 42: andi. r0,r3,1 | 1038 | 42: andi. r0,r3,1 |
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1638 | bge kvm_cede_exit | 1641 | bge kvm_cede_exit |
1639 | stwcx. r4,0,r6 | 1642 | stwcx. r4,0,r6 |
1640 | bne 31b | 1643 | bne 31b |
1644 | /* order napping_threads update vs testing entry_exit_count */ | ||
1645 | isync | ||
1641 | li r0,1 | 1646 | li r0,1 |
1642 | stb r0,HSTATE_NAPPING(r13) | 1647 | stb r0,HSTATE_NAPPING(r13) |
1643 | /* order napping_threads update vs testing entry_exit_count */ | ||
1644 | lwsync | ||
1645 | mr r4,r3 | 1648 | mr r4,r3 |
1646 | lwz r7,VCORE_ENTRY_EXIT(r5) | 1649 | lwz r7,VCORE_ENTRY_EXIT(r5) |
1647 | cmpwi r7,0x100 | 1650 | cmpwi r7,0x100 |
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index f4dd041c14ea..f779450cb07c 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -129,29 +129,32 @@ kvm_start_lightweight: | |||
129 | * R12 = exit handler id | 129 | * R12 = exit handler id |
130 | * R13 = PACA | 130 | * R13 = PACA |
131 | * SVCPU.* = guest * | 131 | * SVCPU.* = guest * |
132 | * MSR.EE = 1 | ||
132 | * | 133 | * |
133 | */ | 134 | */ |
134 | 135 | ||
136 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | ||
137 | |||
138 | /* | ||
139 | * kvmppc_copy_from_svcpu can clobber volatile registers, save | ||
140 | * the exit handler id to the vcpu and restore it from there later. | ||
141 | */ | ||
142 | stw r12, VCPU_TRAP(r3) | ||
143 | |||
135 | /* Transfer reg values from shadow vcpu back to vcpu struct */ | 144 | /* Transfer reg values from shadow vcpu back to vcpu struct */ |
136 | /* On 64-bit, interrupts are still off at this point */ | 145 | /* On 64-bit, interrupts are still off at this point */ |
137 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | 146 | |
138 | GET_SHADOW_VCPU(r4) | 147 | GET_SHADOW_VCPU(r4) |
139 | bl FUNC(kvmppc_copy_from_svcpu) | 148 | bl FUNC(kvmppc_copy_from_svcpu) |
140 | nop | 149 | nop |
141 | 150 | ||
142 | #ifdef CONFIG_PPC_BOOK3S_64 | 151 | #ifdef CONFIG_PPC_BOOK3S_64 |
143 | /* Re-enable interrupts */ | ||
144 | ld r3, HSTATE_HOST_MSR(r13) | ||
145 | ori r3, r3, MSR_EE | ||
146 | MTMSR_EERI(r3) | ||
147 | |||
148 | /* | 152 | /* |
149 | * Reload kernel SPRG3 value. | 153 | * Reload kernel SPRG3 value. |
150 | * No need to save guest value as usermode can't modify SPRG3. | 154 | * No need to save guest value as usermode can't modify SPRG3. |
151 | */ | 155 | */ |
152 | ld r3, PACA_SPRG3(r13) | 156 | ld r3, PACA_SPRG3(r13) |
153 | mtspr SPRN_SPRG3, r3 | 157 | mtspr SPRN_SPRG3, r3 |
154 | |||
155 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 158 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
156 | 159 | ||
157 | /* R7 = vcpu */ | 160 | /* R7 = vcpu */ |
@@ -177,7 +180,7 @@ kvm_start_lightweight: | |||
177 | PPC_STL r31, VCPU_GPR(R31)(r7) | 180 | PPC_STL r31, VCPU_GPR(R31)(r7) |
178 | 181 | ||
179 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | 182 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ |
180 | mr r5, r12 | 183 | lwz r5, VCPU_TRAP(r7) |
181 | 184 | ||
182 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 185 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
183 | REST_2GPRS(3, r1) | 186 | REST_2GPRS(3, r1) |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3dd171..5b9e9063cfaf 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | |||
66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | 67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); |
68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; | 68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
69 | svcpu->in_use = 0; | ||
69 | svcpu_put(svcpu); | 70 | svcpu_put(svcpu); |
70 | #endif | 71 | #endif |
71 | vcpu->cpu = smp_processor_id(); | 72 | vcpu->cpu = smp_processor_id(); |
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | |||
78 | { | 79 | { |
79 | #ifdef CONFIG_PPC_BOOK3S_64 | 80 | #ifdef CONFIG_PPC_BOOK3S_64 |
80 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 81 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
82 | if (svcpu->in_use) { | ||
83 | kvmppc_copy_from_svcpu(vcpu, svcpu); | ||
84 | } | ||
81 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | 85 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; | 86 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
83 | svcpu_put(svcpu); | 87 | svcpu_put(svcpu); |
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |||
110 | svcpu->ctr = vcpu->arch.ctr; | 114 | svcpu->ctr = vcpu->arch.ctr; |
111 | svcpu->lr = vcpu->arch.lr; | 115 | svcpu->lr = vcpu->arch.lr; |
112 | svcpu->pc = vcpu->arch.pc; | 116 | svcpu->pc = vcpu->arch.pc; |
117 | svcpu->in_use = true; | ||
113 | } | 118 | } |
114 | 119 | ||
115 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | 120 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
116 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | 121 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, |
117 | struct kvmppc_book3s_shadow_vcpu *svcpu) | 122 | struct kvmppc_book3s_shadow_vcpu *svcpu) |
118 | { | 123 | { |
124 | /* | ||
125 | * vcpu_put would just call us again because in_use hasn't | ||
126 | * been updated yet. | ||
127 | */ | ||
128 | preempt_disable(); | ||
129 | |||
130 | /* | ||
131 | * Maybe we were already preempted and synced the svcpu from | ||
132 | * our preempt notifiers. Don't bother touching this svcpu then. | ||
133 | */ | ||
134 | if (!svcpu->in_use) | ||
135 | goto out; | ||
136 | |||
119 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | 137 | vcpu->arch.gpr[0] = svcpu->gpr[0]; |
120 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | 138 | vcpu->arch.gpr[1] = svcpu->gpr[1]; |
121 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | 139 | vcpu->arch.gpr[2] = svcpu->gpr[2]; |
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |||
139 | vcpu->arch.fault_dar = svcpu->fault_dar; | 157 | vcpu->arch.fault_dar = svcpu->fault_dar; |
140 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | 158 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; |
141 | vcpu->arch.last_inst = svcpu->last_inst; | 159 | vcpu->arch.last_inst = svcpu->last_inst; |
160 | svcpu->in_use = false; | ||
161 | |||
162 | out: | ||
163 | preempt_enable(); | ||
142 | } | 164 | } |
143 | 165 | ||
144 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | 166 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9edab8..c3c5231adade 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) | |||
153 | 153 | ||
154 | li r6, MSR_IR | MSR_DR | 154 | li r6, MSR_IR | MSR_DR |
155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ | 155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ |
156 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
157 | /* | 156 | /* |
158 | * Set EE in HOST_MSR so that it's enabled when we get into our | 157 | * Set EE in HOST_MSR so that it's enabled when we get into our |
159 | * C exit handler function. On 64-bit we delay enabling | 158 | * C exit handler function. |
160 | * interrupts until we have finished transferring stuff | ||
161 | * to or from the PACA. | ||
162 | */ | 159 | */ |
163 | ori r5, r5, MSR_EE | 160 | ori r5, r5, MSR_EE |
164 | #endif | ||
165 | mtsrr0 r7 | 161 | mtsrr0 r7 |
166 | mtsrr1 r6 | 162 | mtsrr1 r6 |
167 | RFI | 163 | RFI |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a210b9a..0591e05db74b 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | |||
681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
682 | { | 682 | { |
683 | int ret, s; | 683 | int ret, s; |
684 | struct thread_struct thread; | 684 | struct debug_reg debug; |
685 | #ifdef CONFIG_PPC_FPU | 685 | #ifdef CONFIG_PPC_FPU |
686 | struct thread_fp_state fp; | 686 | struct thread_fp_state fp; |
687 | int fpexc_mode; | 687 | int fpexc_mode; |
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
723 | #endif | 723 | #endif |
724 | 724 | ||
725 | /* Switch to guest debug context */ | 725 | /* Switch to guest debug context */ |
726 | thread.debug = vcpu->arch.shadow_dbg_reg; | 726 | debug = vcpu->arch.shadow_dbg_reg; |
727 | switch_booke_debug_regs(&thread); | 727 | switch_booke_debug_regs(&debug); |
728 | thread.debug = current->thread.debug; | 728 | debug = current->thread.debug; |
729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; | 729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; |
730 | 730 | ||
731 | kvmppc_fix_ee_before_entry(); | 731 | kvmppc_fix_ee_before_entry(); |
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
736 | We also get here with interrupts enabled. */ | 736 | We also get here with interrupts enabled. */ |
737 | 737 | ||
738 | /* Switch back to user space debug context */ | 738 | /* Switch back to user space debug context */ |
739 | switch_booke_debug_regs(&thread); | 739 | switch_booke_debug_regs(&debug); |
740 | current->thread.debug = thread.debug; | 740 | current->thread.debug = debug; |
741 | 741 | ||
742 | #ifdef CONFIG_PPC_FPU | 742 | #ifdef CONFIG_PPC_FPU |
743 | kvmppc_save_guest_fp(vcpu); | 743 | kvmppc_save_guest_fp(vcpu); |
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index d73a59014900..596a285c0755 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S | |||
@@ -9,6 +9,14 @@ | |||
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/ppc_asm.h> | 10 | #include <asm/ppc_asm.h> |
11 | 11 | ||
12 | #ifdef __BIG_ENDIAN__ | ||
13 | #define sLd sld /* Shift towards low-numbered address. */ | ||
14 | #define sHd srd /* Shift towards high-numbered address. */ | ||
15 | #else | ||
16 | #define sLd srd /* Shift towards low-numbered address. */ | ||
17 | #define sHd sld /* Shift towards high-numbered address. */ | ||
18 | #endif | ||
19 | |||
12 | .align 7 | 20 | .align 7 |
13 | _GLOBAL(__copy_tofrom_user) | 21 | _GLOBAL(__copy_tofrom_user) |
14 | BEGIN_FTR_SECTION | 22 | BEGIN_FTR_SECTION |
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
118 | 126 | ||
119 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ | 127 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ |
120 | 25: ld r0,8(r4) | 128 | 25: ld r0,8(r4) |
121 | sld r6,r9,r10 | 129 | sLd r6,r9,r10 |
122 | 26: ldu r9,16(r4) | 130 | 26: ldu r9,16(r4) |
123 | srd r7,r0,r11 | 131 | sHd r7,r0,r11 |
124 | sld r8,r0,r10 | 132 | sLd r8,r0,r10 |
125 | or r7,r7,r6 | 133 | or r7,r7,r6 |
126 | blt cr6,79f | 134 | blt cr6,79f |
127 | 27: ld r0,8(r4) | 135 | 27: ld r0,8(r4) |
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
129 | 137 | ||
130 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ | 138 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ |
131 | 29: ldu r9,8(r4) | 139 | 29: ldu r9,8(r4) |
132 | sld r8,r0,r10 | 140 | sLd r8,r0,r10 |
133 | addi r3,r3,-8 | 141 | addi r3,r3,-8 |
134 | blt cr6,5f | 142 | blt cr6,5f |
135 | 30: ld r0,8(r4) | 143 | 30: ld r0,8(r4) |
136 | srd r12,r9,r11 | 144 | sHd r12,r9,r11 |
137 | sld r6,r9,r10 | 145 | sLd r6,r9,r10 |
138 | 31: ldu r9,16(r4) | 146 | 31: ldu r9,16(r4) |
139 | or r12,r8,r12 | 147 | or r12,r8,r12 |
140 | srd r7,r0,r11 | 148 | sHd r7,r0,r11 |
141 | sld r8,r0,r10 | 149 | sLd r8,r0,r10 |
142 | addi r3,r3,16 | 150 | addi r3,r3,16 |
143 | beq cr6,78f | 151 | beq cr6,78f |
144 | 152 | ||
145 | 1: or r7,r7,r6 | 153 | 1: or r7,r7,r6 |
146 | 32: ld r0,8(r4) | 154 | 32: ld r0,8(r4) |
147 | 76: std r12,8(r3) | 155 | 76: std r12,8(r3) |
148 | 2: srd r12,r9,r11 | 156 | 2: sHd r12,r9,r11 |
149 | sld r6,r9,r10 | 157 | sLd r6,r9,r10 |
150 | 33: ldu r9,16(r4) | 158 | 33: ldu r9,16(r4) |
151 | or r12,r8,r12 | 159 | or r12,r8,r12 |
152 | 77: stdu r7,16(r3) | 160 | 77: stdu r7,16(r3) |
153 | srd r7,r0,r11 | 161 | sHd r7,r0,r11 |
154 | sld r8,r0,r10 | 162 | sLd r8,r0,r10 |
155 | bdnz 1b | 163 | bdnz 1b |
156 | 164 | ||
157 | 78: std r12,8(r3) | 165 | 78: std r12,8(r3) |
158 | or r7,r7,r6 | 166 | or r7,r7,r6 |
159 | 79: std r7,16(r3) | 167 | 79: std r7,16(r3) |
160 | 5: srd r12,r9,r11 | 168 | 5: sHd r12,r9,r11 |
161 | or r12,r8,r12 | 169 | or r12,r8,r12 |
162 | 80: std r12,24(r3) | 170 | 80: std r12,24(r3) |
163 | bne 6f | 171 | bne 6f |
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
165 | blr | 173 | blr |
166 | 6: cmpwi cr1,r5,8 | 174 | 6: cmpwi cr1,r5,8 |
167 | addi r3,r3,32 | 175 | addi r3,r3,32 |
168 | sld r9,r9,r10 | 176 | sLd r9,r9,r10 |
169 | ble cr1,7f | 177 | ble cr1,7f |
170 | 34: ld r0,8(r4) | 178 | 34: ld r0,8(r4) |
171 | srd r7,r0,r11 | 179 | sHd r7,r0,r11 |
172 | or r9,r7,r9 | 180 | or r9,r7,r9 |
173 | 7: | 181 | 7: |
174 | bf cr7*4+1,1f | 182 | bf cr7*4+1,1f |
183 | #ifdef __BIG_ENDIAN__ | ||
175 | rotldi r9,r9,32 | 184 | rotldi r9,r9,32 |
185 | #endif | ||
176 | 94: stw r9,0(r3) | 186 | 94: stw r9,0(r3) |
187 | #ifdef __LITTLE_ENDIAN__ | ||
188 | rotrdi r9,r9,32 | ||
189 | #endif | ||
177 | addi r3,r3,4 | 190 | addi r3,r3,4 |
178 | 1: bf cr7*4+2,2f | 191 | 1: bf cr7*4+2,2f |
192 | #ifdef __BIG_ENDIAN__ | ||
179 | rotldi r9,r9,16 | 193 | rotldi r9,r9,16 |
194 | #endif | ||
180 | 95: sth r9,0(r3) | 195 | 95: sth r9,0(r3) |
196 | #ifdef __LITTLE_ENDIAN__ | ||
197 | rotrdi r9,r9,16 | ||
198 | #endif | ||
181 | addi r3,r3,2 | 199 | addi r3,r3,2 |
182 | 2: bf cr7*4+3,3f | 200 | 2: bf cr7*4+3,3f |
201 | #ifdef __BIG_ENDIAN__ | ||
183 | rotldi r9,r9,8 | 202 | rotldi r9,r9,8 |
203 | #endif | ||
184 | 96: stb r9,0(r3) | 204 | 96: stb r9,0(r3) |
205 | #ifdef __LITTLE_ENDIAN__ | ||
206 | rotrdi r9,r9,8 | ||
207 | #endif | ||
185 | 3: li r3,0 | 208 | 3: li r3,0 |
186 | blr | 209 | blr |
187 | 210 | ||
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 02245cee7818..d7ddcee7feb8 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include "powernv.h" | 36 | #include "powernv.h" |
37 | #include "pci.h" | 37 | #include "pci.h" |
38 | 38 | ||
39 | static char *hub_diag = NULL; | ||
40 | static int ioda_eeh_nb_init = 0; | 39 | static int ioda_eeh_nb_init = 0; |
41 | 40 | ||
42 | static int ioda_eeh_event(struct notifier_block *nb, | 41 | static int ioda_eeh_event(struct notifier_block *nb, |
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose) | |||
140 | ioda_eeh_nb_init = 1; | 139 | ioda_eeh_nb_init = 1; |
141 | } | 140 | } |
142 | 141 | ||
143 | /* We needn't HUB diag-data on PHB3 */ | ||
144 | if (phb->type == PNV_PHB_IODA1 && !hub_diag) { | ||
145 | hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
146 | if (!hub_diag) { | ||
147 | pr_err("%s: Out of memory !\n", __func__); | ||
148 | return -ENOMEM; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | #ifdef CONFIG_DEBUG_FS | 142 | #ifdef CONFIG_DEBUG_FS |
153 | if (phb->dbgfs) { | 143 | if (phb->dbgfs) { |
154 | debugfs_create_file("err_injct_outbound", 0600, | 144 | debugfs_create_file("err_injct_outbound", 0600, |
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) | |||
633 | static void ioda_eeh_hub_diag(struct pci_controller *hose) | 623 | static void ioda_eeh_hub_diag(struct pci_controller *hose) |
634 | { | 624 | { |
635 | struct pnv_phb *phb = hose->private_data; | 625 | struct pnv_phb *phb = hose->private_data; |
636 | struct OpalIoP7IOCErrorData *data; | 626 | struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; |
637 | long rc; | 627 | long rc; |
638 | 628 | ||
639 | data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; | 629 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); |
640 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE); | ||
641 | if (rc != OPAL_SUCCESS) { | 630 | if (rc != OPAL_SUCCESS) { |
642 | pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", | 631 | pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", |
643 | __func__, phb->hub_id, rc); | 632 | __func__, phb->hub_id, rc); |
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose) | |||
820 | struct OpalIoPhbErrorCommon *common; | 809 | struct OpalIoPhbErrorCommon *common; |
821 | long rc; | 810 | long rc; |
822 | 811 | ||
823 | common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; | 812 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, |
824 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); | 813 | PNV_PCI_DIAG_BUF_SIZE); |
825 | if (rc != OPAL_SUCCESS) { | 814 | if (rc != OPAL_SUCCESS) { |
826 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", | 815 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", |
827 | __func__, hose->global_number, rc); | 816 | __func__, hose->global_number, rc); |
828 | return; | 817 | return; |
829 | } | 818 | } |
830 | 819 | ||
820 | common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; | ||
831 | switch (common->ioType) { | 821 | switch (common->ioType) { |
832 | case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: | 822 | case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: |
833 | ioda_eeh_p7ioc_phb_diag(hose, common); | 823 | ioda_eeh_p7ioc_phb_diag(hose, common); |
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 911c24ef033e..1ed8d5f40f5a 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -172,11 +172,13 @@ struct pnv_phb { | |||
172 | } ioda; | 172 | } ioda; |
173 | }; | 173 | }; |
174 | 174 | ||
175 | /* PHB status structure */ | 175 | /* PHB and hub status structure */ |
176 | union { | 176 | union { |
177 | unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; | 177 | unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; |
178 | struct OpalIoP7IOCPhbErrorData p7ioc; | 178 | struct OpalIoP7IOCPhbErrorData p7ioc; |
179 | struct OpalIoP7IOCErrorData hub_diag; | ||
179 | } diag; | 180 | } diag; |
181 | |||
180 | }; | 182 | }; |
181 | 183 | ||
182 | extern struct pci_ops pnv_pci_ops; | 184 | extern struct pci_ops pnv_pci_ops; |
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index 7b95f29e3174..3baff31e58cf 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile | |||
@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \ | |||
6 | checksum.o strlen.o div64.o div64-generic.o | 6 | checksum.o strlen.o div64.o div64-generic.o |
7 | 7 | ||
8 | # Extracted from libgcc | 8 | # Extracted from libgcc |
9 | lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ | 9 | obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ |
10 | ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ | 10 | ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ |
11 | udiv_qrnnd.o | 11 | udiv_qrnnd.o |
12 | 12 | ||
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 8358dc144959..0f9e94537eee 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte) | |||
619 | } | 619 | } |
620 | 620 | ||
621 | #define pte_accessible pte_accessible | 621 | #define pte_accessible pte_accessible |
622 | static inline unsigned long pte_accessible(pte_t a) | 622 | static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) |
623 | { | 623 | { |
624 | return pte_val(a) & _PAGE_VALID; | 624 | return pte_val(a) & _PAGE_VALID; |
625 | } | 625 | } |
@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
847 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U | 847 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U |
848 | * and SUN4V pte layout, so this inline test is fine. | 848 | * and SUN4V pte layout, so this inline test is fine. |
849 | */ | 849 | */ |
850 | if (likely(mm != &init_mm) && pte_accessible(orig)) | 850 | if (likely(mm != &init_mm) && pte_accessible(mm, orig)) |
851 | tlb_batch_add(mm, addr, ptep, orig, fullmm); | 851 | tlb_batch_add(mm, addr, ptep, orig, fullmm); |
852 | } | 852 | } |
853 | 853 | ||
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3d1999458709..bbc8b12fa443 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a) | |||
452 | } | 452 | } |
453 | 453 | ||
454 | #define pte_accessible pte_accessible | 454 | #define pte_accessible pte_accessible |
455 | static inline int pte_accessible(pte_t a) | 455 | static inline bool pte_accessible(struct mm_struct *mm, pte_t a) |
456 | { | 456 | { |
457 | return pte_flags(a) & _PAGE_PRESENT; | 457 | if (pte_flags(a) & _PAGE_PRESENT) |
458 | return true; | ||
459 | |||
460 | if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && | ||
461 | mm_tlb_flush_pending(mm)) | ||
462 | return true; | ||
463 | |||
464 | return false; | ||
458 | } | 465 | } |
459 | 466 | ||
460 | static inline int pte_hidden(pte_t pte) | 467 | static inline int pte_hidden(pte_t pte) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dc1ec0dff939..ea04b342c026 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
387 | set_cpu_cap(c, X86_FEATURE_PEBS); | 387 | set_cpu_cap(c, X86_FEATURE_PEBS); |
388 | } | 388 | } |
389 | 389 | ||
390 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) | 390 | if (c->x86 == 6 && cpu_has_clflush && |
391 | (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) | ||
391 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | 392 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); |
392 | 393 | ||
393 | #ifdef CONFIG_X86_64 | 394 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index dd74e46828c0..0596e8e0cc19 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
83 | pte_t pte = gup_get_pte(ptep); | 83 | pte_t pte = gup_get_pte(ptep); |
84 | struct page *page; | 84 | struct page *page; |
85 | 85 | ||
86 | /* Similar to the PMD case, NUMA hinting must take slow path */ | ||
87 | if (pte_numa(pte)) { | ||
88 | pte_unmap(ptep); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
86 | if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { | 92 | if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { |
87 | pte_unmap(ptep); | 93 | pte_unmap(ptep); |
88 | return 0; | 94 | return 0; |
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
167 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) | 173 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) |
168 | return 0; | 174 | return 0; |
169 | if (unlikely(pmd_large(pmd))) { | 175 | if (unlikely(pmd_large(pmd))) { |
176 | /* | ||
177 | * NUMA hinting faults need to be handled in the GUP | ||
178 | * slowpath for accounting purposes and so that they | ||
179 | * can be serialised against THP migration. | ||
180 | */ | ||
181 | if (pmd_numa(pmd)) | ||
182 | return 0; | ||
170 | if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) | 183 | if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) |
171 | return 0; | 184 | return 0; |
172 | } else { | 185 | } else { |