aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/kernel/unwind.c28
-rw-r--r--arch/arc/mm/highmem.c4
-rw-r--r--arch/arm/boot/dts/imx6q-gw5400-a.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw51xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi6
-rw-r--r--arch/arm/boot/dts/omap4-duovero-parlor.dts4
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-primo81.dts1
-rw-r--r--arch/arm/boot/dts/tegra124-nyan.dtsi2
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/timer.c6
-rw-r--r--arch/parisc/kernel/signal.c64
-rw-r--r--arch/powerpc/kvm/book3s_hv.c6
-rw-r--r--arch/s390/kernel/dis.c17
-rw-r--r--arch/sparc/include/asm/elf_64.h1
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/head_64.S13
-rw-r--r--arch/sparc/kernel/perf_event.c11
-rw-r--r--arch/sparc/kernel/rtrap_64.S8
-rw-r--r--arch/sparc/kernel/setup_64.c9
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG2memcpy.S118
-rw-r--r--arch/sparc/lib/NG4copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG4copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG4memcpy.S40
-rw-r--r--arch/sparc/lib/U1copy_from_user.S8
-rw-r--r--arch/sparc/lib/U1copy_to_user.S8
-rw-r--r--arch/sparc/lib/U1memcpy.S48
-rw-r--r--arch/sparc/lib/U3copy_from_user.S8
-rw-r--r--arch/sparc/lib/U3copy_to_user.S8
-rw-r--r--arch/sparc/lib/U3memcpy.S86
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/mtrr.c25
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/vmx.c7
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/um/signal.c2
-rw-r--r--block/blk-core.c4
-rw-r--r--drivers/acpi/processor_driver.c3
-rw-r--r--drivers/block/null_blk.c6
-rw-r--r--drivers/bus/sunxi-rsb.c8
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c63
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c111
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c66
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c1
-rw-r--r--drivers/md/md.c11
-rw-r--r--drivers/memory/fsl_ifc.c1
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/io.c2
-rw-r--r--drivers/mtd/ubi/wl.c53
-rw-r--r--drivers/nvme/host/pci.c20
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/virtio/virtio_ccw.c62
-rw-r--r--drivers/tty/serial/sunhv.c12
-rw-r--r--fs/nfsd/nfs4layouts.c2
-rw-r--r--scripts/recordmcount.c137
-rw-r--r--sound/pci/hda/hda_intel.c34
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/soc/codecs/es8328.c25
-rw-r--r--sound/soc/codecs/es8328.h1
-rw-r--r--sound/soc/codecs/sgtl5000.c1
-rw-r--r--sound/soc/codecs/wm8974.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c4
-rw-r--r--sound/soc/fsl/fsl_sai.c18
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c6
-rw-r--r--sound/soc/rockchip/rockchip_spdif.h2
-rw-r--r--virt/kvm/arm/vgic.c2
82 files changed, 934 insertions, 404 deletions
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index cf0cf34eeb24..aeb19021099e 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -81,7 +81,7 @@ endif
81LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) 81LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
82 82
83# Modules with short calls might break for calls into builtin-kernel 83# Modules with short calls might break for calls into builtin-kernel
84KBUILD_CFLAGS_MODULE += -mlong-calls 84KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
85 85
86# Finally dump eveything into kernel build system 86# Finally dump eveything into kernel build system
87KBUILD_CFLAGS += $(cflags-y) 87KBUILD_CFLAGS += $(cflags-y)
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index abf06e81c929..210ef3e72332 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -62,9 +62,7 @@ extern int ioc_exists;
62#define ARC_REG_IC_IVIC 0x10 62#define ARC_REG_IC_IVIC 0x10
63#define ARC_REG_IC_CTRL 0x11 63#define ARC_REG_IC_CTRL 0x11
64#define ARC_REG_IC_IVIL 0x19 64#define ARC_REG_IC_IVIL 0x19
65#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4)
66#define ARC_REG_IC_PTAG 0x1E 65#define ARC_REG_IC_PTAG 0x1E
67#endif
68#define ARC_REG_IC_PTAG_HI 0x1F 66#define ARC_REG_IC_PTAG_HI 0x1F
69 67
70/* Bit val in IC_CTRL */ 68/* Bit val in IC_CTRL */
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index cf2828ab0905..5eb707640e9c 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -293,13 +293,13 @@ static void init_unwind_hdr(struct unwind_table *table,
293 const u32 *cie = cie_for_fde(fde, table); 293 const u32 *cie = cie_for_fde(fde, table);
294 signed ptrType; 294 signed ptrType;
295 295
296 if (cie == &not_fde) /* only process FDE here */ 296 if (cie == &not_fde)
297 continue; 297 continue;
298 if (cie == NULL || cie == &bad_cie) 298 if (cie == NULL || cie == &bad_cie)
299 continue; /* say FDE->CIE.version != 1 */ 299 goto ret_err;
300 ptrType = fde_pointer_type(cie); 300 ptrType = fde_pointer_type(cie);
301 if (ptrType < 0) 301 if (ptrType < 0)
302 continue; 302 goto ret_err;
303 303
304 ptr = (const u8 *)(fde + 2); 304 ptr = (const u8 *)(fde + 2);
305 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, 305 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
@@ -315,14 +315,14 @@ static void init_unwind_hdr(struct unwind_table *table,
315 } 315 }
316 316
317 if (tableSize || !n) 317 if (tableSize || !n)
318 return; 318 goto ret_err;
319 319
320 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) 320 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
321 + 2 * n * sizeof(unsigned long); 321 + 2 * n * sizeof(unsigned long);
322 322
323 header = alloc(hdrSize); 323 header = alloc(hdrSize);
324 if (!header) 324 if (!header)
325 return; 325 goto ret_err;
326 326
327 header->version = 1; 327 header->version = 1;
328 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; 328 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
@@ -343,10 +343,6 @@ static void init_unwind_hdr(struct unwind_table *table,
343 343
344 if (fde[1] == 0xffffffff) 344 if (fde[1] == 0xffffffff)
345 continue; /* this is a CIE */ 345 continue; /* this is a CIE */
346
347 if (*(u8 *)(cie + 2) != 1)
348 continue; /* FDE->CIE.version not supported */
349
350 ptr = (const u8 *)(fde + 2); 346 ptr = (const u8 *)(fde + 2);
351 header->table[n].start = read_pointer(&ptr, 347 header->table[n].start = read_pointer(&ptr,
352 (const u8 *)(fde + 1) + 348 (const u8 *)(fde + 1) +
@@ -365,6 +361,10 @@ static void init_unwind_hdr(struct unwind_table *table,
365 table->hdrsz = hdrSize; 361 table->hdrsz = hdrSize;
366 smp_wmb(); 362 smp_wmb();
367 table->header = (const void *)header; 363 table->header = (const void *)header;
364 return;
365
366ret_err:
367 panic("Attention !!! Dwarf FDE parsing errors\n");;
368} 368}
369 369
370#ifdef CONFIG_MODULES 370#ifdef CONFIG_MODULES
@@ -523,8 +523,7 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
523 523
524 if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) 524 if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
525 || (*cie & (sizeof(*cie) - 1)) 525 || (*cie & (sizeof(*cie) - 1))
526 || (cie[1] != 0xffffffff) 526 || (cie[1] != 0xffffffff))
527 || ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */
528 return NULL; /* this is not a (valid) CIE */ 527 return NULL; /* this is not a (valid) CIE */
529 return cie; 528 return cie;
530} 529}
@@ -605,9 +604,6 @@ static signed fde_pointer_type(const u32 *cie)
605 const u8 *ptr = (const u8 *)(cie + 2); 604 const u8 *ptr = (const u8 *)(cie + 2);
606 unsigned version = *ptr; 605 unsigned version = *ptr;
607 606
608 if (version != 1)
609 return -1; /* unsupported */
610
611 if (*++ptr) { 607 if (*++ptr) {
612 const char *aug; 608 const char *aug;
613 const u8 *end = (const u8 *)(cie + 1) + *cie; 609 const u8 *end = (const u8 *)(cie + 1) + *cie;
@@ -1019,9 +1015,7 @@ int arc_unwind(struct unwind_frame_info *frame)
1019 ptr = (const u8 *)(cie + 2); 1015 ptr = (const u8 *)(cie + 2);
1020 end = (const u8 *)(cie + 1) + *cie; 1016 end = (const u8 *)(cie + 1) + *cie;
1021 frame->call_frame = 1; 1017 frame->call_frame = 1;
1022 if ((state.version = *ptr) != 1) 1018 if (*++ptr) {
1023 cie = NULL; /* unsupported version */
1024 else if (*++ptr) {
1025 /* check if augmentation size is first (thus present) */ 1019 /* check if augmentation size is first (thus present) */
1026 if (*ptr == 'z') { 1020 if (*ptr == 'z') {
1027 while (++ptr < end && *ptr) { 1021 while (++ptr < end && *ptr) {
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 065ee6bfa82a..92dd92cad7f9 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -111,7 +111,7 @@ void __kunmap_atomic(void *kv)
111} 111}
112EXPORT_SYMBOL(__kunmap_atomic); 112EXPORT_SYMBOL(__kunmap_atomic);
113 113
114noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr) 114static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
115{ 115{
116 pgd_t *pgd_k; 116 pgd_t *pgd_k;
117 pud_t *pud_k; 117 pud_t *pud_k;
@@ -127,7 +127,7 @@ noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
127 return pte_k; 127 return pte_k;
128} 128}
129 129
130void kmap_init(void) 130void __init kmap_init(void)
131{ 131{
132 /* Due to recursive include hell, we can't do this in processor.h */ 132 /* Due to recursive include hell, we can't do this in processor.h */
133 BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE)); 133 BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts
index 58adf176425a..a51834e1dd27 100644
--- a/arch/arm/boot/dts/imx6q-gw5400-a.dts
+++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts
@@ -154,7 +154,7 @@
154&fec { 154&fec {
155 pinctrl-names = "default"; 155 pinctrl-names = "default";
156 pinctrl-0 = <&pinctrl_enet>; 156 pinctrl-0 = <&pinctrl_enet>;
157 phy-mode = "rgmii"; 157 phy-mode = "rgmii-id";
158 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>; 158 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
159 status = "okay"; 159 status = "okay";
160}; 160};
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
index 7b31fdb79ced..dc0cebfe22d7 100644
--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -94,7 +94,7 @@
94&fec { 94&fec {
95 pinctrl-names = "default"; 95 pinctrl-names = "default";
96 pinctrl-0 = <&pinctrl_enet>; 96 pinctrl-0 = <&pinctrl_enet>;
97 phy-mode = "rgmii"; 97 phy-mode = "rgmii-id";
98 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 98 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
99 status = "okay"; 99 status = "okay";
100}; 100};
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index 1b66328a8498..18cd4114a23e 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -154,7 +154,7 @@
154&fec { 154&fec {
155 pinctrl-names = "default"; 155 pinctrl-names = "default";
156 pinctrl-0 = <&pinctrl_enet>; 156 pinctrl-0 = <&pinctrl_enet>;
157 phy-mode = "rgmii"; 157 phy-mode = "rgmii-id";
158 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 158 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
159 status = "okay"; 159 status = "okay";
160}; 160};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 7c51839ff934..eea90f37bbb8 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -155,7 +155,7 @@
155&fec { 155&fec {
156 pinctrl-names = "default"; 156 pinctrl-names = "default";
157 pinctrl-0 = <&pinctrl_enet>; 157 pinctrl-0 = <&pinctrl_enet>;
158 phy-mode = "rgmii"; 158 phy-mode = "rgmii-id";
159 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 159 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
160 status = "okay"; 160 status = "okay";
161}; 161};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 929e0b37bd9e..6c11a2ae35ef 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -145,7 +145,7 @@
145&fec { 145&fec {
146 pinctrl-names = "default"; 146 pinctrl-names = "default";
147 pinctrl-0 = <&pinctrl_enet>; 147 pinctrl-0 = <&pinctrl_enet>;
148 phy-mode = "rgmii"; 148 phy-mode = "rgmii-id";
149 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 149 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
150 status = "okay"; 150 status = "okay";
151}; 151};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 8263fc18a7d9..d354d406954d 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -113,14 +113,14 @@
113&clks { 113&clks {
114 assigned-clocks = <&clks IMX6QDL_PLL4_BYPASS_SRC>, 114 assigned-clocks = <&clks IMX6QDL_PLL4_BYPASS_SRC>,
115 <&clks IMX6QDL_PLL4_BYPASS>, 115 <&clks IMX6QDL_PLL4_BYPASS>,
116 <&clks IMX6QDL_CLK_PLL4_POST_DIV>,
117 <&clks IMX6QDL_CLK_LDB_DI0_SEL>, 116 <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
118 <&clks IMX6QDL_CLK_LDB_DI1_SEL>; 117 <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
118 <&clks IMX6QDL_CLK_PLL4_POST_DIV>;
119 assigned-clock-parents = <&clks IMX6QDL_CLK_LVDS2_IN>, 119 assigned-clock-parents = <&clks IMX6QDL_CLK_LVDS2_IN>,
120 <&clks IMX6QDL_PLL4_BYPASS_SRC>, 120 <&clks IMX6QDL_PLL4_BYPASS_SRC>,
121 <&clks IMX6QDL_CLK_PLL3_USB_OTG>, 121 <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
122 <&clks IMX6QDL_CLK_PLL3_USB_OTG>; 122 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
123 assigned-clock-rates = <0>, <0>, <24576000>; 123 assigned-clock-rates = <0>, <0>, <0>, <0>, <24576000>;
124}; 124};
125 125
126&ecspi1 { 126&ecspi1 {
diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts
index 1a78f013f37a..b75f7b2b7c4a 100644
--- a/arch/arm/boot/dts/omap4-duovero-parlor.dts
+++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts
@@ -189,3 +189,7 @@
189 }; 189 };
190}; 190};
191 191
192&uart3 {
193 interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
194 &omap4_pmx_core OMAP4_UART3_RX>;
195};
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
index 2d4250b1faf8..68b479b8772c 100644
--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
@@ -83,6 +83,7 @@
83 reg = <0x5d>; 83 reg = <0x5d>;
84 interrupt-parent = <&pio>; 84 interrupt-parent = <&pio>;
85 interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; /* PA3 */ 85 interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; /* PA3 */
86 touchscreen-swapped-x-y;
86 }; 87 };
87}; 88};
88 89
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index 40c23a0b7cfc..ec1aa64ded68 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -399,7 +399,7 @@
399 399
400 /* CPU DFLL clock */ 400 /* CPU DFLL clock */
401 clock@0,70110000 { 401 clock@0,70110000 {
402 status = "okay"; 402 status = "disabled";
403 vdd-cpu-supply = <&vdd_cpu>; 403 vdd-cpu-supply = <&vdd_cpu>;
404 nvidia,i2c-fs-rate = <400000>; 404 nvidia,i2c-fs-rate = <400000>;
405 }; 405 };
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 4b4371db5799..0517f0c1581a 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -65,6 +65,8 @@ config SOC_AM43XX
65 select MACH_OMAP_GENERIC 65 select MACH_OMAP_GENERIC
66 select MIGHT_HAVE_CACHE_L2X0 66 select MIGHT_HAVE_CACHE_L2X0
67 select HAVE_ARM_SCU 67 select HAVE_ARM_SCU
68 select GENERIC_CLOCKEVENTS_BROADCAST
69 select HAVE_ARM_TWD
68 70
69config SOC_DRA7XX 71config SOC_DRA7XX
70 bool "TI DRA7XX" 72 bool "TI DRA7XX"
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index b18ebbefae09..f86692dbcfd5 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -320,6 +320,12 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
320 return r; 320 return r;
321} 321}
322 322
323#if !defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
324void tick_broadcast(const struct cpumask *mask)
325{
326}
327#endif
328
323static void __init omap2_gp_clockevent_init(int gptimer_id, 329static void __init omap2_gp_clockevent_init(int gptimer_id,
324 const char *fck_source, 330 const char *fck_source,
325 const char *property) 331 const char *property)
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index dc1ea796fd60..2264f68f3c2f 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -435,6 +435,55 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall)
435 regs->gr[28]); 435 regs->gr[28]);
436} 436}
437 437
438/*
439 * Check how the syscall number gets loaded into %r20 within
440 * the delay branch in userspace and adjust as needed.
441 */
442
443static void check_syscallno_in_delay_branch(struct pt_regs *regs)
444{
445 u32 opcode, source_reg;
446 u32 __user *uaddr;
447 int err;
448
449 /* Usually we don't have to restore %r20 (the system call number)
450 * because it gets loaded in the delay slot of the branch external
451 * instruction via the ldi instruction.
452 * In some cases a register-to-register copy instruction might have
453 * been used instead, in which case we need to copy the syscall
454 * number into the source register before returning to userspace.
455 */
456
457 /* A syscall is just a branch, so all we have to do is fiddle the
458 * return pointer so that the ble instruction gets executed again.
459 */
460 regs->gr[31] -= 8; /* delayed branching */
461
462 /* Get assembler opcode of code in delay branch */
463 uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4);
464 err = get_user(opcode, uaddr);
465 if (err)
466 return;
467
468 /* Check if delay branch uses "ldi int,%r20" */
469 if ((opcode & 0xffff0000) == 0x34140000)
470 return; /* everything ok, just return */
471
472 /* Check if delay branch uses "nop" */
473 if (opcode == INSN_NOP)
474 return;
475
476 /* Check if delay branch uses "copy %rX,%r20" */
477 if ((opcode & 0xffe0ffff) == 0x08000254) {
478 source_reg = (opcode >> 16) & 31;
479 regs->gr[source_reg] = regs->gr[20];
480 return;
481 }
482
483 pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n",
484 current->comm, task_pid_nr(current), opcode);
485}
486
438static inline void 487static inline void
439syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) 488syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
440{ 489{
@@ -457,10 +506,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
457 } 506 }
458 /* fallthrough */ 507 /* fallthrough */
459 case -ERESTARTNOINTR: 508 case -ERESTARTNOINTR:
460 /* A syscall is just a branch, so all 509 check_syscallno_in_delay_branch(regs);
461 * we have to do is fiddle the return pointer.
462 */
463 regs->gr[31] -= 8; /* delayed branching */
464 break; 510 break;
465 } 511 }
466} 512}
@@ -510,15 +556,9 @@ insert_restart_trampoline(struct pt_regs *regs)
510 } 556 }
511 case -ERESTARTNOHAND: 557 case -ERESTARTNOHAND:
512 case -ERESTARTSYS: 558 case -ERESTARTSYS:
513 case -ERESTARTNOINTR: { 559 case -ERESTARTNOINTR:
514 /* Hooray for delayed branching. We don't 560 check_syscallno_in_delay_branch(regs);
515 * have to restore %r20 (the system call
516 * number) because it gets loaded in the delay
517 * slot of the branch external instruction.
518 */
519 regs->gr[31] -= 8;
520 return; 561 return;
521 }
522 default: 562 default:
523 break; 563 break;
524 } 564 }
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 54b45b73195f..a7352b59e6f9 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
224 224
225static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 225static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
226{ 226{
227 /*
228 * Check for illegal transactional state bit combination
229 * and if we find it, force the TS field to a safe state.
230 */
231 if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
232 msr &= ~MSR_TS_MASK;
227 vcpu->arch.shregs.msr = msr; 233 vcpu->arch.shregs.msr = msr;
228 kvmppc_end_cede(vcpu); 234 kvmppc_end_cede(vcpu);
229} 235}
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 8140d10c6785..6e72961608f0 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1920 } 1920 }
1921 if (separator) 1921 if (separator)
1922 ptr += sprintf(ptr, "%c", separator); 1922 ptr += sprintf(ptr, "%c", separator);
1923 /*
1924 * Use four '%' characters below because of the
1925 * following two conversions:
1926 *
1927 * 1) sprintf: %%%%r -> %%r
1928 * 2) printk : %%r -> %r
1929 */
1923 if (operand->flags & OPERAND_GPR) 1930 if (operand->flags & OPERAND_GPR)
1924 ptr += sprintf(ptr, "%%r%i", value); 1931 ptr += sprintf(ptr, "%%%%r%i", value);
1925 else if (operand->flags & OPERAND_FPR) 1932 else if (operand->flags & OPERAND_FPR)
1926 ptr += sprintf(ptr, "%%f%i", value); 1933 ptr += sprintf(ptr, "%%%%f%i", value);
1927 else if (operand->flags & OPERAND_AR) 1934 else if (operand->flags & OPERAND_AR)
1928 ptr += sprintf(ptr, "%%a%i", value); 1935 ptr += sprintf(ptr, "%%%%a%i", value);
1929 else if (operand->flags & OPERAND_CR) 1936 else if (operand->flags & OPERAND_CR)
1930 ptr += sprintf(ptr, "%%c%i", value); 1937 ptr += sprintf(ptr, "%%%%c%i", value);
1931 else if (operand->flags & OPERAND_VR) 1938 else if (operand->flags & OPERAND_VR)
1932 ptr += sprintf(ptr, "%%v%i", value); 1939 ptr += sprintf(ptr, "%%%%v%i", value);
1933 else if (operand->flags & OPERAND_PCREL) 1940 else if (operand->flags & OPERAND_PCREL)
1934 ptr += sprintf(ptr, "%lx", (signed int) value 1941 ptr += sprintf(ptr, "%lx", (signed int) value
1935 + addr); 1942 + addr);
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
index 370ca1e71ffb..93310837c2df 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -95,6 +95,7 @@
95 * really available. So we simply advertise only "crypto" support. 95 * really available. So we simply advertise only "crypto" support.
96 */ 96 */
97#define HWCAP_SPARC_CRYPTO 0x04000000 /* CRYPTO insns available */ 97#define HWCAP_SPARC_CRYPTO 0x04000000 /* CRYPTO insns available */
98#define HWCAP_SPARC_ADI 0x08000000 /* ADI available */
98 99
99#define CORE_DUMP_USE_REGSET 100#define CORE_DUMP_USE_REGSET
100 101
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index efe9479f837b..f31a124a8497 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -417,8 +417,9 @@
417#define __NR_bpf 349 417#define __NR_bpf 349
418#define __NR_execveat 350 418#define __NR_execveat 350
419#define __NR_membarrier 351 419#define __NR_membarrier 351
420#define __NR_userfaultfd 352
420 421
421#define NR_syscalls 352 422#define NR_syscalls 353
422 423
423/* Bitmask values returned from kern_features system call. */ 424/* Bitmask values returned from kern_features system call. */
424#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 425#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 3d61fcae7ee3..f2d30cab5b3f 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -946,6 +946,12 @@ ENTRY(__retl_one)
946 mov 1, %o0 946 mov 1, %o0
947ENDPROC(__retl_one) 947ENDPROC(__retl_one)
948 948
949ENTRY(__retl_one_fp)
950 VISExitHalf
951 retl
952 mov 1, %o0
953ENDPROC(__retl_one_fp)
954
949ENTRY(__ret_one_asi) 955ENTRY(__ret_one_asi)
950 wr %g0, ASI_AIUS, %asi 956 wr %g0, ASI_AIUS, %asi
951 ret 957 ret
@@ -958,6 +964,13 @@ ENTRY(__retl_one_asi)
958 mov 1, %o0 964 mov 1, %o0
959ENDPROC(__retl_one_asi) 965ENDPROC(__retl_one_asi)
960 966
967ENTRY(__retl_one_asi_fp)
968 wr %g0, ASI_AIUS, %asi
969 VISExitHalf
970 retl
971 mov 1, %o0
972ENDPROC(__retl_one_asi_fp)
973
961ENTRY(__retl_o1) 974ENTRY(__retl_o1)
962 retl 975 retl
963 mov %o1, %o0 976 mov %o1, %o0
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 3091267c5cc3..6596f66ce112 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1828,11 +1828,18 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1828void 1828void
1829perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 1829perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1830{ 1830{
1831 u64 saved_fault_address = current_thread_info()->fault_address;
1832 u8 saved_fault_code = get_thread_fault_code();
1833 mm_segment_t old_fs;
1834
1831 perf_callchain_store(entry, regs->tpc); 1835 perf_callchain_store(entry, regs->tpc);
1832 1836
1833 if (!current->mm) 1837 if (!current->mm)
1834 return; 1838 return;
1835 1839
1840 old_fs = get_fs();
1841 set_fs(USER_DS);
1842
1836 flushw_user(); 1843 flushw_user();
1837 1844
1838 pagefault_disable(); 1845 pagefault_disable();
@@ -1843,4 +1850,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1843 perf_callchain_user_64(entry, regs); 1850 perf_callchain_user_64(entry, regs);
1844 1851
1845 pagefault_enable(); 1852 pagefault_enable();
1853
1854 set_fs(old_fs);
1855 set_thread_fault_code(saved_fault_code);
1856 current_thread_info()->fault_address = saved_fault_address;
1846} 1857}
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 39f0c662f4c8..d08bdaffdbfc 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -73,7 +73,13 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
73 andn %l1, %l4, %l1 73 andn %l1, %l4, %l1
74 srl %l4, 20, %l4 74 srl %l4, 20, %l4
75 ba,pt %xcc, rtrap_no_irq_enable 75 ba,pt %xcc, rtrap_no_irq_enable
76 wrpr %l4, %pil 76 nop
77 /* Do not actually set the %pil here. We will do that
78 * below after we clear PSTATE_IE in the %pstate register.
79 * If we re-enable interrupts here, we can recurse down
80 * the hardirq stack potentially endlessly, causing a
81 * stack overflow.
82 */
77 83
78 .align 64 84 .align 64
79 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall 85 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index f7b261749383..f3185e2b028b 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -380,7 +380,8 @@ static const char *hwcaps[] = {
380 */ 380 */
381 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2", 381 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
382 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau", 382 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
383 "ima", "cspare", "pause", "cbcond", 383 "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
384 "adp",
384}; 385};
385 386
386static const char *crypto_hwcaps[] = { 387static const char *crypto_hwcaps[] = {
@@ -396,7 +397,7 @@ void cpucap_info(struct seq_file *m)
396 seq_puts(m, "cpucaps\t\t: "); 397 seq_puts(m, "cpucaps\t\t: ");
397 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 398 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
398 unsigned long bit = 1UL << i; 399 unsigned long bit = 1UL << i;
399 if (caps & bit) { 400 if (hwcaps[i] && (caps & bit)) {
400 seq_printf(m, "%s%s", 401 seq_printf(m, "%s%s",
401 printed ? "," : "", hwcaps[i]); 402 printed ? "," : "", hwcaps[i]);
402 printed++; 403 printed++;
@@ -450,7 +451,7 @@ static void __init report_hwcaps(unsigned long caps)
450 451
451 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 452 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
452 unsigned long bit = 1UL << i; 453 unsigned long bit = 1UL << i;
453 if (caps & bit) 454 if (hwcaps[i] && (caps & bit))
454 report_one_hwcap(&printed, hwcaps[i]); 455 report_one_hwcap(&printed, hwcaps[i]);
455 } 456 }
456 if (caps & HWCAP_SPARC_CRYPTO) 457 if (caps & HWCAP_SPARC_CRYPTO)
@@ -485,7 +486,7 @@ static unsigned long __init mdesc_cpu_hwcap_list(void)
485 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 486 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
486 unsigned long bit = 1UL << i; 487 unsigned long bit = 1UL << i;
487 488
488 if (!strcmp(prop, hwcaps[i])) { 489 if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
489 caps |= bit; 490 caps |= bit;
490 break; 491 break;
491 } 492 }
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index cc23b62b6e38..78e80293cb6d 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -87,4 +87,4 @@ sys_call_table:
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
90/*350*/ .long sys_execveat, sys_membarrier 90/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index f229468a7479..2549c2c3ec2f 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -88,7 +88,7 @@ sys_call_table32:
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
91/*350*/ .word sys32_execveat, sys_membarrier 91/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd
92 92
93#endif /* CONFIG_COMPAT */ 93#endif /* CONFIG_COMPAT */
94 94
@@ -168,4 +168,4 @@ sys_call_table:
168 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 168 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
169/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 169/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
170 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 170 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
171/*350*/ .word sys64_execveat, sys_membarrier 171/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
index 119ccb9a54f4..d5242b8c4f94 100644
--- a/arch/sparc/lib/NG2copy_from_user.S
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\
19 .text; \
20 .align 4;
21
14#ifndef ASI_AIUS 22#ifndef ASI_AIUS
15#define ASI_AIUS 0x11 23#define ASI_AIUS 0x11
16#endif 24#endif
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
index 7fe1ccefd9d0..4e962d993b10 100644
--- a/arch/sparc/lib/NG2copy_to_user.S
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\
19 .text; \
20 .align 4;
21
14#ifndef ASI_AIUS 22#ifndef ASI_AIUS
15#define ASI_AIUS 0x11 23#define ASI_AIUS 0x11
16#endif 24#endif
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index 30eee6e8a81b..d5f585df2f3f 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -34,10 +34,16 @@
34#ifndef EX_LD 34#ifndef EX_LD
35#define EX_LD(x) x 35#define EX_LD(x) x
36#endif 36#endif
37#ifndef EX_LD_FP
38#define EX_LD_FP(x) x
39#endif
37 40
38#ifndef EX_ST 41#ifndef EX_ST
39#define EX_ST(x) x 42#define EX_ST(x) x
40#endif 43#endif
44#ifndef EX_ST_FP
45#define EX_ST_FP(x) x
46#endif
41 47
42#ifndef EX_RETVAL 48#ifndef EX_RETVAL
43#define EX_RETVAL(x) x 49#define EX_RETVAL(x) x
@@ -134,40 +140,40 @@
134 fsrc2 %x6, %f12; \ 140 fsrc2 %x6, %f12; \
135 fsrc2 %x7, %f14; 141 fsrc2 %x7, %f14;
136#define FREG_LOAD_1(base, x0) \ 142#define FREG_LOAD_1(base, x0) \
137 EX_LD(LOAD(ldd, base + 0x00, %x0)) 143 EX_LD_FP(LOAD(ldd, base + 0x00, %x0))
138#define FREG_LOAD_2(base, x0, x1) \ 144#define FREG_LOAD_2(base, x0, x1) \
139 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 145 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
140 EX_LD(LOAD(ldd, base + 0x08, %x1)); 146 EX_LD_FP(LOAD(ldd, base + 0x08, %x1));
141#define FREG_LOAD_3(base, x0, x1, x2) \ 147#define FREG_LOAD_3(base, x0, x1, x2) \
142 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 148 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
143 EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 149 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
144 EX_LD(LOAD(ldd, base + 0x10, %x2)); 150 EX_LD_FP(LOAD(ldd, base + 0x10, %x2));
145#define FREG_LOAD_4(base, x0, x1, x2, x3) \ 151#define FREG_LOAD_4(base, x0, x1, x2, x3) \
146 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 152 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
147 EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 153 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
148 EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 154 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
149 EX_LD(LOAD(ldd, base + 0x18, %x3)); 155 EX_LD_FP(LOAD(ldd, base + 0x18, %x3));
150#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ 156#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
151 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 157 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
152 EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 158 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
153 EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 159 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
154 EX_LD(LOAD(ldd, base + 0x18, %x3)); \ 160 EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
155 EX_LD(LOAD(ldd, base + 0x20, %x4)); 161 EX_LD_FP(LOAD(ldd, base + 0x20, %x4));
156#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ 162#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
157 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 163 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
158 EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 164 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
159 EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 165 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
160 EX_LD(LOAD(ldd, base + 0x18, %x3)); \ 166 EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
161 EX_LD(LOAD(ldd, base + 0x20, %x4)); \ 167 EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
162 EX_LD(LOAD(ldd, base + 0x28, %x5)); 168 EX_LD_FP(LOAD(ldd, base + 0x28, %x5));
163#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ 169#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
164 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 170 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
165 EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 171 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
166 EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 172 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
167 EX_LD(LOAD(ldd, base + 0x18, %x3)); \ 173 EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
168 EX_LD(LOAD(ldd, base + 0x20, %x4)); \ 174 EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
169 EX_LD(LOAD(ldd, base + 0x28, %x5)); \ 175 EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \
170 EX_LD(LOAD(ldd, base + 0x30, %x6)); 176 EX_LD_FP(LOAD(ldd, base + 0x30, %x6));
171 177
172 .register %g2,#scratch 178 .register %g2,#scratch
173 .register %g3,#scratch 179 .register %g3,#scratch
@@ -275,11 +281,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
275 nop 281 nop
276 /* fall through for 0 < low bits < 8 */ 282 /* fall through for 0 < low bits < 8 */
277110: sub %o4, 64, %g2 283110: sub %o4, 64, %g2
278 EX_LD(LOAD_BLK(%g2, %f0)) 284 EX_LD_FP(LOAD_BLK(%g2, %f0))
2791: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 2851: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
280 EX_LD(LOAD_BLK(%o4, %f16)) 286 EX_LD_FP(LOAD_BLK(%o4, %f16))
281 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) 287 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
282 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 288 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
283 FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) 289 FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
284 subcc %g1, 64, %g1 290 subcc %g1, 64, %g1
285 add %o4, 64, %o4 291 add %o4, 64, %o4
@@ -290,10 +296,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
290 296
291120: sub %o4, 56, %g2 297120: sub %o4, 56, %g2
292 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) 298 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
2931: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 2991: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
294 EX_LD(LOAD_BLK(%o4, %f16)) 300 EX_LD_FP(LOAD_BLK(%o4, %f16))
295 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) 301 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
296 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 302 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
297 FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) 303 FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
298 subcc %g1, 64, %g1 304 subcc %g1, 64, %g1
299 add %o4, 64, %o4 305 add %o4, 64, %o4
@@ -304,10 +310,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
304 310
305130: sub %o4, 48, %g2 311130: sub %o4, 48, %g2
306 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) 312 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
3071: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3131: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
308 EX_LD(LOAD_BLK(%o4, %f16)) 314 EX_LD_FP(LOAD_BLK(%o4, %f16))
309 FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) 315 FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
310 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 316 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
311 FREG_MOVE_6(f20, f22, f24, f26, f28, f30) 317 FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
312 subcc %g1, 64, %g1 318 subcc %g1, 64, %g1
313 add %o4, 64, %o4 319 add %o4, 64, %o4
@@ -318,10 +324,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
318 324
319140: sub %o4, 40, %g2 325140: sub %o4, 40, %g2
320 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) 326 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
3211: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3271: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
322 EX_LD(LOAD_BLK(%o4, %f16)) 328 EX_LD_FP(LOAD_BLK(%o4, %f16))
323 FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) 329 FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
324 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 330 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
325 FREG_MOVE_5(f22, f24, f26, f28, f30) 331 FREG_MOVE_5(f22, f24, f26, f28, f30)
326 subcc %g1, 64, %g1 332 subcc %g1, 64, %g1
327 add %o4, 64, %o4 333 add %o4, 64, %o4
@@ -332,10 +338,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
332 338
333150: sub %o4, 32, %g2 339150: sub %o4, 32, %g2
334 FREG_LOAD_4(%g2, f0, f2, f4, f6) 340 FREG_LOAD_4(%g2, f0, f2, f4, f6)
3351: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3411: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
336 EX_LD(LOAD_BLK(%o4, %f16)) 342 EX_LD_FP(LOAD_BLK(%o4, %f16))
337 FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) 343 FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
338 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 344 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
339 FREG_MOVE_4(f24, f26, f28, f30) 345 FREG_MOVE_4(f24, f26, f28, f30)
340 subcc %g1, 64, %g1 346 subcc %g1, 64, %g1
341 add %o4, 64, %o4 347 add %o4, 64, %o4
@@ -346,10 +352,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
346 352
347160: sub %o4, 24, %g2 353160: sub %o4, 24, %g2
348 FREG_LOAD_3(%g2, f0, f2, f4) 354 FREG_LOAD_3(%g2, f0, f2, f4)
3491: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3551: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
350 EX_LD(LOAD_BLK(%o4, %f16)) 356 EX_LD_FP(LOAD_BLK(%o4, %f16))
351 FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) 357 FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
352 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 358 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
353 FREG_MOVE_3(f26, f28, f30) 359 FREG_MOVE_3(f26, f28, f30)
354 subcc %g1, 64, %g1 360 subcc %g1, 64, %g1
355 add %o4, 64, %o4 361 add %o4, 64, %o4
@@ -360,10 +366,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
360 366
361170: sub %o4, 16, %g2 367170: sub %o4, 16, %g2
362 FREG_LOAD_2(%g2, f0, f2) 368 FREG_LOAD_2(%g2, f0, f2)
3631: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3691: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
364 EX_LD(LOAD_BLK(%o4, %f16)) 370 EX_LD_FP(LOAD_BLK(%o4, %f16))
365 FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) 371 FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
366 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 372 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
367 FREG_MOVE_2(f28, f30) 373 FREG_MOVE_2(f28, f30)
368 subcc %g1, 64, %g1 374 subcc %g1, 64, %g1
369 add %o4, 64, %o4 375 add %o4, 64, %o4
@@ -374,10 +380,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
374 380
375180: sub %o4, 8, %g2 381180: sub %o4, 8, %g2
376 FREG_LOAD_1(%g2, f0) 382 FREG_LOAD_1(%g2, f0)
3771: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3831: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
378 EX_LD(LOAD_BLK(%o4, %f16)) 384 EX_LD_FP(LOAD_BLK(%o4, %f16))
379 FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) 385 FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
380 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 386 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
381 FREG_MOVE_1(f30) 387 FREG_MOVE_1(f30)
382 subcc %g1, 64, %g1 388 subcc %g1, 64, %g1
383 add %o4, 64, %o4 389 add %o4, 64, %o4
@@ -387,10 +393,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
387 nop 393 nop
388 394
389190: 395190:
3901: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3961: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
391 subcc %g1, 64, %g1 397 subcc %g1, 64, %g1
392 EX_LD(LOAD_BLK(%o4, %f0)) 398 EX_LD_FP(LOAD_BLK(%o4, %f0))
393 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 399 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
394 add %o4, 64, %o4 400 add %o4, 64, %o4
395 bne,pt %xcc, 1b 401 bne,pt %xcc, 1b
396 LOAD(prefetch, %o4 + 64, #one_read) 402 LOAD(prefetch, %o4 + 64, #one_read)
diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S
index fd9f903ffa32..2e8ee7ad07a9 100644
--- a/arch/sparc/lib/NG4copy_from_user.S
+++ b/arch/sparc/lib/NG4copy_from_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\
19 .text; \
20 .align 4;
21
14#ifndef ASI_AIUS 22#ifndef ASI_AIUS
15#define ASI_AIUS 0x11 23#define ASI_AIUS 0x11
16#endif 24#endif
diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S
index 9744c4540a8d..be0bf4590df8 100644
--- a/arch/sparc/lib/NG4copy_to_user.S
+++ b/arch/sparc/lib/NG4copy_to_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\
19 .text; \
20 .align 4;
21
14#ifndef ASI_AIUS 22#ifndef ASI_AIUS
15#define ASI_AIUS 0x11 23#define ASI_AIUS 0x11
16#endif 24#endif
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 83aeeb1dffdb..8e13ee1f4454 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -48,10 +48,16 @@
48#ifndef EX_LD 48#ifndef EX_LD
49#define EX_LD(x) x 49#define EX_LD(x) x
50#endif 50#endif
51#ifndef EX_LD_FP
52#define EX_LD_FP(x) x
53#endif
51 54
52#ifndef EX_ST 55#ifndef EX_ST
53#define EX_ST(x) x 56#define EX_ST(x) x
54#endif 57#endif
58#ifndef EX_ST_FP
59#define EX_ST_FP(x) x
60#endif
55 61
56#ifndef EX_RETVAL 62#ifndef EX_RETVAL
57#define EX_RETVAL(x) x 63#define EX_RETVAL(x) x
@@ -210,17 +216,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
210 sub %o2, %o4, %o2 216 sub %o2, %o4, %o2
211 alignaddr %o1, %g0, %g1 217 alignaddr %o1, %g0, %g1
212 add %o1, %o4, %o1 218 add %o1, %o4, %o1
213 EX_LD(LOAD(ldd, %g1 + 0x00, %f0)) 219 EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0))
2141: EX_LD(LOAD(ldd, %g1 + 0x08, %f2)) 2201: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2))
215 subcc %o4, 0x40, %o4 221 subcc %o4, 0x40, %o4
216 EX_LD(LOAD(ldd, %g1 + 0x10, %f4)) 222 EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4))
217 EX_LD(LOAD(ldd, %g1 + 0x18, %f6)) 223 EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6))
218 EX_LD(LOAD(ldd, %g1 + 0x20, %f8)) 224 EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8))
219 EX_LD(LOAD(ldd, %g1 + 0x28, %f10)) 225 EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10))
220 EX_LD(LOAD(ldd, %g1 + 0x30, %f12)) 226 EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12))
221 EX_LD(LOAD(ldd, %g1 + 0x38, %f14)) 227 EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14))
222 faligndata %f0, %f2, %f16 228 faligndata %f0, %f2, %f16
223 EX_LD(LOAD(ldd, %g1 + 0x40, %f0)) 229 EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0))
224 faligndata %f2, %f4, %f18 230 faligndata %f2, %f4, %f18
225 add %g1, 0x40, %g1 231 add %g1, 0x40, %g1
226 faligndata %f4, %f6, %f20 232 faligndata %f4, %f6, %f20
@@ -229,14 +235,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
229 faligndata %f10, %f12, %f26 235 faligndata %f10, %f12, %f26
230 faligndata %f12, %f14, %f28 236 faligndata %f12, %f14, %f28
231 faligndata %f14, %f0, %f30 237 faligndata %f14, %f0, %f30
232 EX_ST(STORE(std, %f16, %o0 + 0x00)) 238 EX_ST_FP(STORE(std, %f16, %o0 + 0x00))
233 EX_ST(STORE(std, %f18, %o0 + 0x08)) 239 EX_ST_FP(STORE(std, %f18, %o0 + 0x08))
234 EX_ST(STORE(std, %f20, %o0 + 0x10)) 240 EX_ST_FP(STORE(std, %f20, %o0 + 0x10))
235 EX_ST(STORE(std, %f22, %o0 + 0x18)) 241 EX_ST_FP(STORE(std, %f22, %o0 + 0x18))
236 EX_ST(STORE(std, %f24, %o0 + 0x20)) 242 EX_ST_FP(STORE(std, %f24, %o0 + 0x20))
237 EX_ST(STORE(std, %f26, %o0 + 0x28)) 243 EX_ST_FP(STORE(std, %f26, %o0 + 0x28))
238 EX_ST(STORE(std, %f28, %o0 + 0x30)) 244 EX_ST_FP(STORE(std, %f28, %o0 + 0x30))
239 EX_ST(STORE(std, %f30, %o0 + 0x38)) 245 EX_ST_FP(STORE(std, %f30, %o0 + 0x38))
240 add %o0, 0x40, %o0 246 add %o0, 0x40, %o0
241 bne,pt %icc, 1b 247 bne,pt %icc, 1b
242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong) 248 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
index a6ae2ea04bf5..ecc5692fa2b4 100644
--- a/arch/sparc/lib/U1copy_from_user.S
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_fp;\
19 .text; \
20 .align 4;
21
14#define FUNC_NAME ___copy_from_user 22#define FUNC_NAME ___copy_from_user
15#define LOAD(type,addr,dest) type##a [addr] %asi, dest 23#define LOAD(type,addr,dest) type##a [addr] %asi, dest
16#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest 24#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
index f4b970eeb485..9eea392e44d4 100644
--- a/arch/sparc/lib/U1copy_to_user.S
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_fp;\
19 .text; \
20 .align 4;
21
14#define FUNC_NAME ___copy_to_user 22#define FUNC_NAME ___copy_to_user
15#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS 23#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
16#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS 24#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index b67142b7768e..3e6209ebb7d7 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -25,10 +25,16 @@
25#ifndef EX_LD 25#ifndef EX_LD
26#define EX_LD(x) x 26#define EX_LD(x) x
27#endif 27#endif
28#ifndef EX_LD_FP
29#define EX_LD_FP(x) x
30#endif
28 31
29#ifndef EX_ST 32#ifndef EX_ST
30#define EX_ST(x) x 33#define EX_ST(x) x
31#endif 34#endif
35#ifndef EX_ST_FP
36#define EX_ST_FP(x) x
37#endif
32 38
33#ifndef EX_RETVAL 39#ifndef EX_RETVAL
34#define EX_RETVAL(x) x 40#define EX_RETVAL(x) x
@@ -73,8 +79,8 @@
73 faligndata %f8, %f9, %f62; 79 faligndata %f8, %f9, %f62;
74 80
75#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \ 81#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
76 EX_LD(LOAD_BLK(%src, %fdest)); \ 82 EX_LD_FP(LOAD_BLK(%src, %fdest)); \
77 EX_ST(STORE_BLK(%fsrc, %dest)); \ 83 EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
78 add %src, 0x40, %src; \ 84 add %src, 0x40, %src; \
79 subcc %len, 0x40, %len; \ 85 subcc %len, 0x40, %len; \
80 be,pn %xcc, jmptgt; \ 86 be,pn %xcc, jmptgt; \
@@ -89,12 +95,12 @@
89 95
90#define DO_SYNC membar #Sync; 96#define DO_SYNC membar #Sync;
91#define STORE_SYNC(dest, fsrc) \ 97#define STORE_SYNC(dest, fsrc) \
92 EX_ST(STORE_BLK(%fsrc, %dest)); \ 98 EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
93 add %dest, 0x40, %dest; \ 99 add %dest, 0x40, %dest; \
94 DO_SYNC 100 DO_SYNC
95 101
96#define STORE_JUMP(dest, fsrc, target) \ 102#define STORE_JUMP(dest, fsrc, target) \
97 EX_ST(STORE_BLK(%fsrc, %dest)); \ 103 EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
98 add %dest, 0x40, %dest; \ 104 add %dest, 0x40, %dest; \
99 ba,pt %xcc, target; \ 105 ba,pt %xcc, target; \
100 nop; 106 nop;
@@ -103,7 +109,7 @@
103 subcc %left, 8, %left;\ 109 subcc %left, 8, %left;\
104 bl,pn %xcc, 95f; \ 110 bl,pn %xcc, 95f; \
105 faligndata %f0, %f1, %f48; \ 111 faligndata %f0, %f1, %f48; \
106 EX_ST(STORE(std, %f48, %dest)); \ 112 EX_ST_FP(STORE(std, %f48, %dest)); \
107 add %dest, 8, %dest; 113 add %dest, 8, %dest;
108 114
109#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ 115#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
@@ -160,8 +166,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
160 and %g2, 0x38, %g2 166 and %g2, 0x38, %g2
161 167
1621: subcc %g1, 0x1, %g1 1681: subcc %g1, 0x1, %g1
163 EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) 169 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
164 EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE)) 170 EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
165 bgu,pt %XCC, 1b 171 bgu,pt %XCC, 1b
166 add %o1, 0x1, %o1 172 add %o1, 0x1, %o1
167 173
@@ -172,20 +178,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
172 be,pt %icc, 3f 178 be,pt %icc, 3f
173 alignaddr %o1, %g0, %o1 179 alignaddr %o1, %g0, %o1
174 180
175 EX_LD(LOAD(ldd, %o1, %f4)) 181 EX_LD_FP(LOAD(ldd, %o1, %f4))
1761: EX_LD(LOAD(ldd, %o1 + 0x8, %f6)) 1821: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
177 add %o1, 0x8, %o1 183 add %o1, 0x8, %o1
178 subcc %g2, 0x8, %g2 184 subcc %g2, 0x8, %g2
179 faligndata %f4, %f6, %f0 185 faligndata %f4, %f6, %f0
180 EX_ST(STORE(std, %f0, %o0)) 186 EX_ST_FP(STORE(std, %f0, %o0))
181 be,pn %icc, 3f 187 be,pn %icc, 3f
182 add %o0, 0x8, %o0 188 add %o0, 0x8, %o0
183 189
184 EX_LD(LOAD(ldd, %o1 + 0x8, %f4)) 190 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
185 add %o1, 0x8, %o1 191 add %o1, 0x8, %o1
186 subcc %g2, 0x8, %g2 192 subcc %g2, 0x8, %g2
187 faligndata %f6, %f4, %f0 193 faligndata %f6, %f4, %f0
188 EX_ST(STORE(std, %f0, %o0)) 194 EX_ST_FP(STORE(std, %f0, %o0))
189 bne,pt %icc, 1b 195 bne,pt %icc, 1b
190 add %o0, 0x8, %o0 196 add %o0, 0x8, %o0
191 197
@@ -208,13 +214,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
208 add %g1, %GLOBAL_SPARE, %g1 214 add %g1, %GLOBAL_SPARE, %g1
209 subcc %o2, %g3, %o2 215 subcc %o2, %g3, %o2
210 216
211 EX_LD(LOAD_BLK(%o1, %f0)) 217 EX_LD_FP(LOAD_BLK(%o1, %f0))
212 add %o1, 0x40, %o1 218 add %o1, 0x40, %o1
213 add %g1, %g3, %g1 219 add %g1, %g3, %g1
214 EX_LD(LOAD_BLK(%o1, %f16)) 220 EX_LD_FP(LOAD_BLK(%o1, %f16))
215 add %o1, 0x40, %o1 221 add %o1, 0x40, %o1
216 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE 222 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
217 EX_LD(LOAD_BLK(%o1, %f32)) 223 EX_LD_FP(LOAD_BLK(%o1, %f32))
218 add %o1, 0x40, %o1 224 add %o1, 0x40, %o1
219 225
220 /* There are 8 instances of the unrolled loop, 226 /* There are 8 instances of the unrolled loop,
@@ -426,28 +432,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
42662: FINISH_VISCHUNK(o0, f44, f46, g3) 43262: FINISH_VISCHUNK(o0, f44, f46, g3)
42763: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3) 43363: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
428 434
42993: EX_LD(LOAD(ldd, %o1, %f2)) 43593: EX_LD_FP(LOAD(ldd, %o1, %f2))
430 add %o1, 8, %o1 436 add %o1, 8, %o1
431 subcc %g3, 8, %g3 437 subcc %g3, 8, %g3
432 faligndata %f0, %f2, %f8 438 faligndata %f0, %f2, %f8
433 EX_ST(STORE(std, %f8, %o0)) 439 EX_ST_FP(STORE(std, %f8, %o0))
434 bl,pn %xcc, 95f 440 bl,pn %xcc, 95f
435 add %o0, 8, %o0 441 add %o0, 8, %o0
436 EX_LD(LOAD(ldd, %o1, %f0)) 442 EX_LD_FP(LOAD(ldd, %o1, %f0))
437 add %o1, 8, %o1 443 add %o1, 8, %o1
438 subcc %g3, 8, %g3 444 subcc %g3, 8, %g3
439 faligndata %f2, %f0, %f8 445 faligndata %f2, %f0, %f8
440 EX_ST(STORE(std, %f8, %o0)) 446 EX_ST_FP(STORE(std, %f8, %o0))
441 bge,pt %xcc, 93b 447 bge,pt %xcc, 93b
442 add %o0, 8, %o0 448 add %o0, 8, %o0
443 449
44495: brz,pt %o2, 2f 45095: brz,pt %o2, 2f
445 mov %g1, %o1 451 mov %g1, %o1
446 452
4471: EX_LD(LOAD(ldub, %o1, %o3)) 4531: EX_LD_FP(LOAD(ldub, %o1, %o3))
448 add %o1, 1, %o1 454 add %o1, 1, %o1
449 subcc %o2, 1, %o2 455 subcc %o2, 1, %o2
450 EX_ST(STORE(stb, %o3, %o0)) 456 EX_ST_FP(STORE(stb, %o3, %o0))
451 bne,pt %xcc, 1b 457 bne,pt %xcc, 1b
452 add %o0, 1, %o0 458 add %o0, 1, %o0
453 459
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S
index b1acd1331c33..88ad73d86fe4 100644
--- a/arch/sparc/lib/U3copy_from_user.S
+++ b/arch/sparc/lib/U3copy_from_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_fp;\
19 .text; \
20 .align 4;
21
14#define FUNC_NAME U3copy_from_user 22#define FUNC_NAME U3copy_from_user
15#define LOAD(type,addr,dest) type##a [addr] %asi, dest 23#define LOAD(type,addr,dest) type##a [addr] %asi, dest
16#define EX_RETVAL(x) 0 24#define EX_RETVAL(x) 0
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
index ef1e493afdfa..845139d75537 100644
--- a/arch/sparc/lib/U3copy_to_user.S
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_fp;\
19 .text; \
20 .align 4;
21
14#define FUNC_NAME U3copy_to_user 22#define FUNC_NAME U3copy_to_user
15#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS 23#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
16#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS 24#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 7cae9cc6a204..491ee69e4995 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -24,10 +24,16 @@
24#ifndef EX_LD 24#ifndef EX_LD
25#define EX_LD(x) x 25#define EX_LD(x) x
26#endif 26#endif
27#ifndef EX_LD_FP
28#define EX_LD_FP(x) x
29#endif
27 30
28#ifndef EX_ST 31#ifndef EX_ST
29#define EX_ST(x) x 32#define EX_ST(x) x
30#endif 33#endif
34#ifndef EX_ST_FP
35#define EX_ST_FP(x) x
36#endif
31 37
32#ifndef EX_RETVAL 38#ifndef EX_RETVAL
33#define EX_RETVAL(x) x 39#define EX_RETVAL(x) x
@@ -120,8 +126,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
120 and %g2, 0x38, %g2 126 and %g2, 0x38, %g2
121 127
1221: subcc %g1, 0x1, %g1 1281: subcc %g1, 0x1, %g1
123 EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) 129 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
124 EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE)) 130 EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
125 bgu,pt %XCC, 1b 131 bgu,pt %XCC, 1b
126 add %o1, 0x1, %o1 132 add %o1, 0x1, %o1
127 133
@@ -132,20 +138,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
132 be,pt %icc, 3f 138 be,pt %icc, 3f
133 alignaddr %o1, %g0, %o1 139 alignaddr %o1, %g0, %o1
134 140
135 EX_LD(LOAD(ldd, %o1, %f4)) 141 EX_LD_FP(LOAD(ldd, %o1, %f4))
1361: EX_LD(LOAD(ldd, %o1 + 0x8, %f6)) 1421: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
137 add %o1, 0x8, %o1 143 add %o1, 0x8, %o1
138 subcc %g2, 0x8, %g2 144 subcc %g2, 0x8, %g2
139 faligndata %f4, %f6, %f0 145 faligndata %f4, %f6, %f0
140 EX_ST(STORE(std, %f0, %o0)) 146 EX_ST_FP(STORE(std, %f0, %o0))
141 be,pn %icc, 3f 147 be,pn %icc, 3f
142 add %o0, 0x8, %o0 148 add %o0, 0x8, %o0
143 149
144 EX_LD(LOAD(ldd, %o1 + 0x8, %f4)) 150 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
145 add %o1, 0x8, %o1 151 add %o1, 0x8, %o1
146 subcc %g2, 0x8, %g2 152 subcc %g2, 0x8, %g2
147 faligndata %f6, %f4, %f2 153 faligndata %f6, %f4, %f2
148 EX_ST(STORE(std, %f2, %o0)) 154 EX_ST_FP(STORE(std, %f2, %o0))
149 bne,pt %icc, 1b 155 bne,pt %icc, 1b
150 add %o0, 0x8, %o0 156 add %o0, 0x8, %o0
151 157
@@ -155,25 +161,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
155 LOAD(prefetch, %o1 + 0x080, #one_read) 161 LOAD(prefetch, %o1 + 0x080, #one_read)
156 LOAD(prefetch, %o1 + 0x0c0, #one_read) 162 LOAD(prefetch, %o1 + 0x0c0, #one_read)
157 LOAD(prefetch, %o1 + 0x100, #one_read) 163 LOAD(prefetch, %o1 + 0x100, #one_read)
158 EX_LD(LOAD(ldd, %o1 + 0x000, %f0)) 164 EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0))
159 LOAD(prefetch, %o1 + 0x140, #one_read) 165 LOAD(prefetch, %o1 + 0x140, #one_read)
160 EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) 166 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
161 LOAD(prefetch, %o1 + 0x180, #one_read) 167 LOAD(prefetch, %o1 + 0x180, #one_read)
162 EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) 168 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
163 LOAD(prefetch, %o1 + 0x1c0, #one_read) 169 LOAD(prefetch, %o1 + 0x1c0, #one_read)
164 faligndata %f0, %f2, %f16 170 faligndata %f0, %f2, %f16
165 EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) 171 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
166 faligndata %f2, %f4, %f18 172 faligndata %f2, %f4, %f18
167 EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) 173 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
168 faligndata %f4, %f6, %f20 174 faligndata %f4, %f6, %f20
169 EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) 175 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
170 faligndata %f6, %f8, %f22 176 faligndata %f6, %f8, %f22
171 177
172 EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) 178 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
173 faligndata %f8, %f10, %f24 179 faligndata %f8, %f10, %f24
174 EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) 180 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
175 faligndata %f10, %f12, %f26 181 faligndata %f10, %f12, %f26
176 EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) 182 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
177 183
178 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE 184 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
179 add %o1, 0x40, %o1 185 add %o1, 0x40, %o1
@@ -184,26 +190,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
184 190
185 .align 64 191 .align 64
1861: 1921:
187 EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) 193 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
188 faligndata %f12, %f14, %f28 194 faligndata %f12, %f14, %f28
189 EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) 195 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
190 faligndata %f14, %f0, %f30 196 faligndata %f14, %f0, %f30
191 EX_ST(STORE_BLK(%f16, %o0)) 197 EX_ST_FP(STORE_BLK(%f16, %o0))
192 EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) 198 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
193 faligndata %f0, %f2, %f16 199 faligndata %f0, %f2, %f16
194 add %o0, 0x40, %o0 200 add %o0, 0x40, %o0
195 201
196 EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) 202 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
197 faligndata %f2, %f4, %f18 203 faligndata %f2, %f4, %f18
198 EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) 204 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
199 faligndata %f4, %f6, %f20 205 faligndata %f4, %f6, %f20
200 EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) 206 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
201 subcc %o3, 0x01, %o3 207 subcc %o3, 0x01, %o3
202 faligndata %f6, %f8, %f22 208 faligndata %f6, %f8, %f22
203 EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) 209 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
204 210
205 faligndata %f8, %f10, %f24 211 faligndata %f8, %f10, %f24
206 EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) 212 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
207 LOAD(prefetch, %o1 + 0x1c0, #one_read) 213 LOAD(prefetch, %o1 + 0x1c0, #one_read)
208 faligndata %f10, %f12, %f26 214 faligndata %f10, %f12, %f26
209 bg,pt %XCC, 1b 215 bg,pt %XCC, 1b
@@ -211,29 +217,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
211 217
212 /* Finally we copy the last full 64-byte block. */ 218 /* Finally we copy the last full 64-byte block. */
2132: 2192:
214 EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) 220 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
215 faligndata %f12, %f14, %f28 221 faligndata %f12, %f14, %f28
216 EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) 222 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
217 faligndata %f14, %f0, %f30 223 faligndata %f14, %f0, %f30
218 EX_ST(STORE_BLK(%f16, %o0)) 224 EX_ST_FP(STORE_BLK(%f16, %o0))
219 EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) 225 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
220 faligndata %f0, %f2, %f16 226 faligndata %f0, %f2, %f16
221 EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) 227 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
222 faligndata %f2, %f4, %f18 228 faligndata %f2, %f4, %f18
223 EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) 229 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
224 faligndata %f4, %f6, %f20 230 faligndata %f4, %f6, %f20
225 EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) 231 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
226 faligndata %f6, %f8, %f22 232 faligndata %f6, %f8, %f22
227 EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) 233 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
228 faligndata %f8, %f10, %f24 234 faligndata %f8, %f10, %f24
229 cmp %g1, 0 235 cmp %g1, 0
230 be,pt %XCC, 1f 236 be,pt %XCC, 1f
231 add %o0, 0x40, %o0 237 add %o0, 0x40, %o0
232 EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) 238 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
2331: faligndata %f10, %f12, %f26 2391: faligndata %f10, %f12, %f26
234 faligndata %f12, %f14, %f28 240 faligndata %f12, %f14, %f28
235 faligndata %f14, %f0, %f30 241 faligndata %f14, %f0, %f30
236 EX_ST(STORE_BLK(%f16, %o0)) 242 EX_ST_FP(STORE_BLK(%f16, %o0))
237 add %o0, 0x40, %o0 243 add %o0, 0x40, %o0
238 add %o1, 0x40, %o1 244 add %o1, 0x40, %o1
239 membar #Sync 245 membar #Sync
@@ -253,20 +259,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
253 259
254 sub %o2, %g2, %o2 260 sub %o2, %g2, %o2
255 be,a,pt %XCC, 1f 261 be,a,pt %XCC, 1f
256 EX_LD(LOAD(ldd, %o1 + 0x00, %f0)) 262 EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0))
257 263
2581: EX_LD(LOAD(ldd, %o1 + 0x08, %f2)) 2641: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2))
259 add %o1, 0x8, %o1 265 add %o1, 0x8, %o1
260 subcc %g2, 0x8, %g2 266 subcc %g2, 0x8, %g2
261 faligndata %f0, %f2, %f8 267 faligndata %f0, %f2, %f8
262 EX_ST(STORE(std, %f8, %o0)) 268 EX_ST_FP(STORE(std, %f8, %o0))
263 be,pn %XCC, 2f 269 be,pn %XCC, 2f
264 add %o0, 0x8, %o0 270 add %o0, 0x8, %o0
265 EX_LD(LOAD(ldd, %o1 + 0x08, %f0)) 271 EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0))
266 add %o1, 0x8, %o1 272 add %o1, 0x8, %o1
267 subcc %g2, 0x8, %g2 273 subcc %g2, 0x8, %g2
268 faligndata %f2, %f0, %f8 274 faligndata %f2, %f0, %f8
269 EX_ST(STORE(std, %f8, %o0)) 275 EX_ST_FP(STORE(std, %f8, %o0))
270 bne,pn %XCC, 1b 276 bne,pn %XCC, 1b
271 add %o0, 0x8, %o0 277 add %o0, 0x8, %o0
272 278
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 06332cb7e7d1..3f5c48ddba45 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
38 return best && (best->ecx & bit(X86_FEATURE_XSAVE)); 38 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
39} 39}
40 40
41static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
42{
43 struct kvm_cpuid_entry2 *best;
44
45 best = kvm_find_cpuid_entry(vcpu, 1, 0);
46 return best && (best->edx & bit(X86_FEATURE_MTRR));
47}
48
41static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) 49static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
42{ 50{
43 struct kvm_cpuid_entry2 *best; 51 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 9e8bf13572e6..3f8c732117ec 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121} 121}
122 122
123static u8 mtrr_disabled_type(void) 123static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
124{ 124{
125 /* 125 /*
126 * Intel SDM 11.11.2.2: all MTRRs are disabled when 126 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC 127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128 * memory type is applied to all of physical memory. 128 * memory type is applied to all of physical memory.
129 *
130 * However, virtual machines can be run with CPUID such that
131 * there are no MTRRs. In that case, the firmware will never
132 * enable MTRRs and it is obviously undesirable to run the
133 * guest entirely with UC memory and we use WB.
129 */ 134 */
130 return MTRR_TYPE_UNCACHABLE; 135 if (guest_cpuid_has_mtrr(vcpu))
136 return MTRR_TYPE_UNCACHABLE;
137 else
138 return MTRR_TYPE_WRBACK;
131} 139}
132 140
133/* 141/*
@@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
267 275
268 for (seg = 0; seg < seg_num; seg++) { 276 for (seg = 0; seg < seg_num; seg++) {
269 mtrr_seg = &fixed_seg_table[seg]; 277 mtrr_seg = &fixed_seg_table[seg];
270 if (mtrr_seg->start >= addr && addr < mtrr_seg->end) 278 if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
271 return seg; 279 return seg;
272 } 280 }
273 281
@@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
300 *start = range->base & PAGE_MASK; 308 *start = range->base & PAGE_MASK;
301 309
302 mask = range->mask & PAGE_MASK; 310 mask = range->mask & PAGE_MASK;
303 mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
304 311
305 /* This cannot overflow because writing to the reserved bits of 312 /* This cannot overflow because writing to the reserved bits of
306 * variable MTRRs causes a #GP. 313 * variable MTRRs causes a #GP.
@@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
356 if (var_mtrr_range_is_valid(cur)) 363 if (var_mtrr_range_is_valid(cur))
357 list_del(&mtrr_state->var_ranges[index].node); 364 list_del(&mtrr_state->var_ranges[index].node);
358 365
366 /* Extend the mask with all 1 bits to the left, since those
367 * bits must implicitly be 0. The bits are then cleared
368 * when reading them.
369 */
359 if (!is_mtrr_mask) 370 if (!is_mtrr_mask)
360 cur->base = data; 371 cur->base = data;
361 else 372 else
362 cur->mask = data; 373 cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
363 374
364 /* add it to the list if it's enabled. */ 375 /* add it to the list if it's enabled. */
365 if (var_mtrr_range_is_valid(cur)) { 376 if (var_mtrr_range_is_valid(cur)) {
@@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
426 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; 437 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
427 else 438 else
428 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; 439 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
440
441 *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
429 } 442 }
430 443
431 return 0; 444 return 0;
@@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
670 } 683 }
671 684
672 if (iter.mtrr_disabled) 685 if (iter.mtrr_disabled)
673 return mtrr_disabled_type(); 686 return mtrr_disabled_type(vcpu);
674 687
675 /* not contained in any MTRRs. */ 688 /* not contained in any MTRRs. */
676 if (type == -1) 689 if (type == -1)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 83a1c643f9a5..899c40f826dd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
3422 struct kvm_run *kvm_run = vcpu->run; 3422 struct kvm_run *kvm_run = vcpu->run;
3423 u32 exit_code = svm->vmcb->control.exit_code; 3423 u32 exit_code = svm->vmcb->control.exit_code;
3424 3424
3425 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
3426
3425 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) 3427 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3426 vcpu->arch.cr0 = svm->vmcb->save.cr0; 3428 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3427 if (npt_enabled) 3429 if (npt_enabled)
@@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3892 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3894 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3893 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3895 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3894 3896
3895 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3896
3897 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 3897 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3898 kvm_before_handle_nmi(&svm->vcpu); 3898 kvm_before_handle_nmi(&svm->vcpu);
3899 3899
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index af823a388c19..44976a596fa6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2803,7 +2803,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2803 msr_info->data = vcpu->arch.ia32_xss; 2803 msr_info->data = vcpu->arch.ia32_xss;
2804 break; 2804 break;
2805 case MSR_TSC_AUX: 2805 case MSR_TSC_AUX:
2806 if (!guest_cpuid_has_rdtscp(vcpu)) 2806 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
2807 return 1; 2807 return 1;
2808 /* Otherwise falls through */ 2808 /* Otherwise falls through */
2809 default: 2809 default:
@@ -2909,7 +2909,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2909 clear_atomic_switch_msr(vmx, MSR_IA32_XSS); 2909 clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
2910 break; 2910 break;
2911 case MSR_TSC_AUX: 2911 case MSR_TSC_AUX:
2912 if (!guest_cpuid_has_rdtscp(vcpu)) 2912 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
2913 return 1; 2913 return 1;
2914 /* Check reserved bit, higher 32 bits should be zero */ 2914 /* Check reserved bit, higher 32 bits should be zero */
2915 if ((data >> 32) != 0) 2915 if ((data >> 32) != 0)
@@ -8042,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
8042 u32 exit_reason = vmx->exit_reason; 8042 u32 exit_reason = vmx->exit_reason;
8043 u32 vectoring_info = vmx->idt_vectoring_info; 8043 u32 vectoring_info = vmx->idt_vectoring_info;
8044 8044
8045 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
8046
8045 /* 8047 /*
8046 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 8048 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
8047 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before 8049 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -8668,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
8668 vmx->loaded_vmcs->launched = 1; 8670 vmx->loaded_vmcs->launched = 1;
8669 8671
8670 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); 8672 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
8671 trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
8672 8673
8673 /* 8674 /*
8674 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if 8675 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eed32283d22c..7ffc224bbe41 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3572,9 +3572,11 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3572 3572
3573static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3573static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3574{ 3574{
3575 int i;
3575 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3576 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3576 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); 3577 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3577 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); 3578 for (i = 0; i < 3; i++)
3579 kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
3578 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3580 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3579 return 0; 3581 return 0;
3580} 3582}
@@ -3593,6 +3595,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3593static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3595static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3594{ 3596{
3595 int start = 0; 3597 int start = 0;
3598 int i;
3596 u32 prev_legacy, cur_legacy; 3599 u32 prev_legacy, cur_legacy;
3597 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3600 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3598 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 3601 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
@@ -3602,7 +3605,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3602 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, 3605 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3603 sizeof(kvm->arch.vpit->pit_state.channels)); 3606 sizeof(kvm->arch.vpit->pit_state.channels));
3604 kvm->arch.vpit->pit_state.flags = ps->flags; 3607 kvm->arch.vpit->pit_state.flags = ps->flags;
3605 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); 3608 for (i = 0; i < 3; i++)
3609 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
3606 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3610 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3607 return 0; 3611 return 0;
3608} 3612}
@@ -6515,6 +6519,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6515 if (req_immediate_exit) 6519 if (req_immediate_exit)
6516 smp_send_reschedule(vcpu->cpu); 6520 smp_send_reschedule(vcpu->cpu);
6517 6521
6522 trace_kvm_entry(vcpu->vcpu_id);
6523 wait_lapic_expire(vcpu);
6518 __kvm_guest_enter(); 6524 __kvm_guest_enter();
6519 6525
6520 if (unlikely(vcpu->arch.switch_db_regs)) { 6526 if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -6527,8 +6533,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6527 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; 6533 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6528 } 6534 }
6529 6535
6530 trace_kvm_entry(vcpu->vcpu_id);
6531 wait_lapic_expire(vcpu);
6532 kvm_x86_ops->run(vcpu); 6536 kvm_x86_ops->run(vcpu);
6533 6537
6534 /* 6538 /*
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index e5f854ce2d72..14fcd01ed992 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -470,7 +470,7 @@ long sys_sigreturn(void)
470 struct sigcontext __user *sc = &frame->sc; 470 struct sigcontext __user *sc = &frame->sc;
471 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); 471 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
472 472
473 if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) || 473 if (copy_from_user(&set.sig[0], &sc->oldmask, sizeof(set.sig[0])) ||
474 copy_from_user(&set.sig[1], frame->extramask, sig_size)) 474 copy_from_user(&set.sig[1], frame->extramask, sig_size))
475 goto segfault; 475 goto segfault;
476 476
diff --git a/block/blk-core.c b/block/blk-core.c
index 3636be469fa2..c487b94c59e3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1689,8 +1689,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1689 struct request *req; 1689 struct request *req;
1690 unsigned int request_count = 0; 1690 unsigned int request_count = 0;
1691 1691
1692 blk_queue_split(q, &bio, q->bio_split);
1693
1694 /* 1692 /*
1695 * low level driver can indicate that it wants pages above a 1693 * low level driver can indicate that it wants pages above a
1696 * certain limit bounced to low memory (ie for highmem, or even 1694 * certain limit bounced to low memory (ie for highmem, or even
@@ -1698,6 +1696,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1698 */ 1696 */
1699 blk_queue_bounce(q, &bio); 1697 blk_queue_bounce(q, &bio);
1700 1698
1699 blk_queue_split(q, &bio, q->bio_split);
1700
1701 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1701 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1702 bio->bi_error = -EIO; 1702 bio->bi_error = -EIO;
1703 bio_endio(bio); 1703 bio_endio(bio);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index f4e02ae93f58..11154a330f07 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -200,7 +200,8 @@ static int acpi_pss_perf_init(struct acpi_processor *pr,
200 goto err_remove_sysfs_thermal; 200 goto err_remove_sysfs_thermal;
201 } 201 }
202 202
203 sysfs_remove_link(&pr->cdev->device.kobj, "device"); 203 return 0;
204
204 err_remove_sysfs_thermal: 205 err_remove_sysfs_thermal:
205 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 206 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
206 err_thermal_unregister: 207 err_thermal_unregister:
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8162475d96b5..a428e4ef71fd 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -219,6 +219,9 @@ static void end_cmd(struct nullb_cmd *cmd)
219{ 219{
220 struct request_queue *q = NULL; 220 struct request_queue *q = NULL;
221 221
222 if (cmd->rq)
223 q = cmd->rq->q;
224
222 switch (queue_mode) { 225 switch (queue_mode) {
223 case NULL_Q_MQ: 226 case NULL_Q_MQ:
224 blk_mq_end_request(cmd->rq, 0); 227 blk_mq_end_request(cmd->rq, 0);
@@ -232,9 +235,6 @@ static void end_cmd(struct nullb_cmd *cmd)
232 goto free_cmd; 235 goto free_cmd;
233 } 236 }
234 237
235 if (cmd->rq)
236 q = cmd->rq->q;
237
238 /* Restart queue if needed, as we are freeing a tag */ 238 /* Restart queue if needed, as we are freeing a tag */
239 if (q && !q->mq_ops && blk_queue_stopped(q)) { 239 if (q && !q->mq_ops && blk_queue_stopped(q)) {
240 unsigned long flags; 240 unsigned long flags;
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 846bc29c157d..25996e256110 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -342,13 +342,13 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
342 342
343 ret = _sunxi_rsb_run_xfer(rsb); 343 ret = _sunxi_rsb_run_xfer(rsb);
344 if (ret) 344 if (ret)
345 goto out; 345 goto unlock;
346 346
347 *buf = readl(rsb->regs + RSB_DATA); 347 *buf = readl(rsb->regs + RSB_DATA);
348 348
349unlock:
349 mutex_unlock(&rsb->lock); 350 mutex_unlock(&rsb->lock);
350 351
351out:
352 return ret; 352 return ret;
353} 353}
354 354
@@ -527,9 +527,9 @@ static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb)
527 */ 527 */
528 528
529static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = { 529static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = {
530 { 0x3e3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */ 530 { 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
531 { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */ 531 { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */
532 { 0xe89, 0x45 }, /* Peripheral IC: AC100, ... */ 532 { 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */
533}; 533};
534 534
535static u8 sunxi_rsb_get_rtaddr(u16 hwaddr) 535static u8 sunxi_rsb_get_rtaddr(u16 hwaddr)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 2c3b16fd3a01..de5e89b2eaaa 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -31,7 +31,7 @@ static struct scpi_ops *scpi_ops;
31 31
32static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev) 32static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev)
33{ 33{
34 u8 domain = topology_physical_package_id(cpu_dev->id); 34 int domain = topology_physical_package_id(cpu_dev->id);
35 35
36 if (domain < 0) 36 if (domain < 0)
37 return ERR_PTR(-EINVAL); 37 return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5a5f04d0902d..048cfe073dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1264,7 +1264,8 @@ struct amdgpu_cs_parser {
1264 struct ww_acquire_ctx ticket; 1264 struct ww_acquire_ctx ticket;
1265 1265
1266 /* user fence */ 1266 /* user fence */
1267 struct amdgpu_user_fence uf; 1267 struct amdgpu_user_fence uf;
1268 struct amdgpu_bo_list_entry uf_entry;
1268}; 1269};
1269 1270
1270struct amdgpu_job { 1271struct amdgpu_job {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4f352ec9dec4..25a3e2485cc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
127 return 0; 127 return 0;
128} 128}
129 129
130static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
131 struct drm_amdgpu_cs_chunk_fence *fence_data)
132{
133 struct drm_gem_object *gobj;
134 uint32_t handle;
135
136 handle = fence_data->handle;
137 gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
138 fence_data->handle);
139 if (gobj == NULL)
140 return -EINVAL;
141
142 p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
143 p->uf.offset = fence_data->offset;
144
145 if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
146 drm_gem_object_unreference_unlocked(gobj);
147 return -EINVAL;
148 }
149
150 p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
151 p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
152 p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
153 p->uf_entry.priority = 0;
154 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
155 p->uf_entry.tv.shared = true;
156
157 drm_gem_object_unreference_unlocked(gobj);
158 return 0;
159}
160
130int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 161int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
131{ 162{
132 union drm_amdgpu_cs *cs = data; 163 union drm_amdgpu_cs *cs = data;
@@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
207 238
208 case AMDGPU_CHUNK_ID_FENCE: 239 case AMDGPU_CHUNK_ID_FENCE:
209 size = sizeof(struct drm_amdgpu_cs_chunk_fence); 240 size = sizeof(struct drm_amdgpu_cs_chunk_fence);
210 if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { 241 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
211 uint32_t handle;
212 struct drm_gem_object *gobj;
213 struct drm_amdgpu_cs_chunk_fence *fence_data;
214
215 fence_data = (void *)p->chunks[i].kdata;
216 handle = fence_data->handle;
217 gobj = drm_gem_object_lookup(p->adev->ddev,
218 p->filp, handle);
219 if (gobj == NULL) {
220 ret = -EINVAL;
221 goto free_partial_kdata;
222 }
223
224 p->uf.bo = gem_to_amdgpu_bo(gobj);
225 amdgpu_bo_ref(p->uf.bo);
226 drm_gem_object_unreference_unlocked(gobj);
227 p->uf.offset = fence_data->offset;
228 } else {
229 ret = -EINVAL; 242 ret = -EINVAL;
230 goto free_partial_kdata; 243 goto free_partial_kdata;
231 } 244 }
245
246 ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
247 if (ret)
248 goto free_partial_kdata;
249
232 break; 250 break;
233 251
234 case AMDGPU_CHUNK_ID_DEPENDENCIES: 252 case AMDGPU_CHUNK_ID_DEPENDENCIES:
@@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
391 p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, 409 p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
392 &p->validated); 410 &p->validated);
393 411
412 if (p->uf.bo)
413 list_add(&p->uf_entry.tv.head, &p->validated);
414
394 if (need_mmap_lock) 415 if (need_mmap_lock)
395 down_read(&current->mm->mmap_sem); 416 down_read(&current->mm->mmap_sem);
396 417
@@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
488 for (i = 0; i < parser->num_ibs; i++) 509 for (i = 0; i < parser->num_ibs; i++)
489 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 510 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
490 kfree(parser->ibs); 511 kfree(parser->ibs);
491 if (parser->uf.bo) 512 amdgpu_bo_unref(&parser->uf.bo);
492 amdgpu_bo_unref(&parser->uf.bo); 513 amdgpu_bo_unref(&parser->uf_entry.robj);
493} 514}
494 515
495static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 516static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index b3ba27fd9a6b..e69357172ffb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
55{ 55{
56 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 56 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
57 57
58 if (!state->enable)
59 return 0;
60
58 if (exynos_crtc->ops->atomic_check) 61 if (exynos_crtc->ops->atomic_check)
59 return exynos_crtc->ops->atomic_check(exynos_crtc, state); 62 return exynos_crtc->ops->atomic_check(exynos_crtc, state);
60 63
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a01e51581c4c..f4af19a0d569 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2193,8 +2193,17 @@ struct drm_i915_gem_request {
2193 struct drm_i915_private *i915; 2193 struct drm_i915_private *i915;
2194 struct intel_engine_cs *ring; 2194 struct intel_engine_cs *ring;
2195 2195
2196 /** GEM sequence number associated with this request. */ 2196 /** GEM sequence number associated with the previous request,
2197 uint32_t seqno; 2197 * when the HWS breadcrumb is equal to this the GPU is processing
2198 * this request.
2199 */
2200 u32 previous_seqno;
2201
2202 /** GEM sequence number associated with this request,
2203 * when the HWS breadcrumb is equal or greater than this the GPU
2204 * has finished processing this request.
2205 */
2206 u32 seqno;
2198 2207
2199 /** Position in the ringbuffer of the start of the request */ 2208 /** Position in the ringbuffer of the start of the request */
2200 u32 head; 2209 u32 head;
@@ -2839,6 +2848,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
2839 2848
2840int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2849int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
2841 u32 flags); 2850 u32 flags);
2851void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
2842int __must_check i915_vma_unbind(struct i915_vma *vma); 2852int __must_check i915_vma_unbind(struct i915_vma *vma);
2843/* 2853/*
2844 * BEWARE: Do not use the function below unless you can _absolutely_ 2854 * BEWARE: Do not use the function below unless you can _absolutely_
@@ -2910,15 +2920,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
2910 return (int32_t)(seq1 - seq2) >= 0; 2920 return (int32_t)(seq1 - seq2) >= 0;
2911} 2921}
2912 2922
2923static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
2924 bool lazy_coherency)
2925{
2926 u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
2927 return i915_seqno_passed(seqno, req->previous_seqno);
2928}
2929
2913static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 2930static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
2914 bool lazy_coherency) 2931 bool lazy_coherency)
2915{ 2932{
2916 u32 seqno; 2933 u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
2917
2918 BUG_ON(req == NULL);
2919
2920 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
2921
2922 return i915_seqno_passed(seqno, req->seqno); 2934 return i915_seqno_passed(seqno, req->seqno);
2923} 2935}
2924 2936
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 32e6aade6223..f56af0aaafde 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1146,23 +1146,74 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
1146 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); 1146 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1147} 1147}
1148 1148
1149static int __i915_spin_request(struct drm_i915_gem_request *req) 1149static unsigned long local_clock_us(unsigned *cpu)
1150{
1151 unsigned long t;
1152
1153 /* Cheaply and approximately convert from nanoseconds to microseconds.
1154 * The result and subsequent calculations are also defined in the same
1155 * approximate microseconds units. The principal source of timing
1156 * error here is from the simple truncation.
1157 *
1158 * Note that local_clock() is only defined wrt to the current CPU;
1159 * the comparisons are no longer valid if we switch CPUs. Instead of
1160 * blocking preemption for the entire busywait, we can detect the CPU
1161 * switch and use that as indicator of system load and a reason to
1162 * stop busywaiting, see busywait_stop().
1163 */
1164 *cpu = get_cpu();
1165 t = local_clock() >> 10;
1166 put_cpu();
1167
1168 return t;
1169}
1170
1171static bool busywait_stop(unsigned long timeout, unsigned cpu)
1172{
1173 unsigned this_cpu;
1174
1175 if (time_after(local_clock_us(&this_cpu), timeout))
1176 return true;
1177
1178 return this_cpu != cpu;
1179}
1180
1181static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1150{ 1182{
1151 unsigned long timeout; 1183 unsigned long timeout;
1184 unsigned cpu;
1185
1186 /* When waiting for high frequency requests, e.g. during synchronous
1187 * rendering split between the CPU and GPU, the finite amount of time
1188 * required to set up the irq and wait upon it limits the response
1189 * rate. By busywaiting on the request completion for a short while we
1190 * can service the high frequency waits as quick as possible. However,
1191 * if it is a slow request, we want to sleep as quickly as possible.
1192 * The tradeoff between waiting and sleeping is roughly the time it
1193 * takes to sleep on a request, on the order of a microsecond.
1194 */
1152 1195
1153 if (i915_gem_request_get_ring(req)->irq_refcount) 1196 if (req->ring->irq_refcount)
1154 return -EBUSY; 1197 return -EBUSY;
1155 1198
1156 timeout = jiffies + 1; 1199 /* Only spin if we know the GPU is processing this request */
1200 if (!i915_gem_request_started(req, true))
1201 return -EAGAIN;
1202
1203 timeout = local_clock_us(&cpu) + 5;
1157 while (!need_resched()) { 1204 while (!need_resched()) {
1158 if (i915_gem_request_completed(req, true)) 1205 if (i915_gem_request_completed(req, true))
1159 return 0; 1206 return 0;
1160 1207
1161 if (time_after_eq(jiffies, timeout)) 1208 if (signal_pending_state(state, current))
1209 break;
1210
1211 if (busywait_stop(timeout, cpu))
1162 break; 1212 break;
1163 1213
1164 cpu_relax_lowlatency(); 1214 cpu_relax_lowlatency();
1165 } 1215 }
1216
1166 if (i915_gem_request_completed(req, false)) 1217 if (i915_gem_request_completed(req, false))
1167 return 0; 1218 return 0;
1168 1219
@@ -1197,6 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1197 struct drm_i915_private *dev_priv = dev->dev_private; 1248 struct drm_i915_private *dev_priv = dev->dev_private;
1198 const bool irq_test_in_progress = 1249 const bool irq_test_in_progress =
1199 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1250 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1251 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1200 DEFINE_WAIT(wait); 1252 DEFINE_WAIT(wait);
1201 unsigned long timeout_expire; 1253 unsigned long timeout_expire;
1202 s64 before, now; 1254 s64 before, now;
@@ -1229,7 +1281,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1229 before = ktime_get_raw_ns(); 1281 before = ktime_get_raw_ns();
1230 1282
1231 /* Optimistic spin for the next jiffie before touching IRQs */ 1283 /* Optimistic spin for the next jiffie before touching IRQs */
1232 ret = __i915_spin_request(req); 1284 ret = __i915_spin_request(req, state);
1233 if (ret == 0) 1285 if (ret == 0)
1234 goto out; 1286 goto out;
1235 1287
@@ -1241,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1241 for (;;) { 1293 for (;;) {
1242 struct timer_list timer; 1294 struct timer_list timer;
1243 1295
1244 prepare_to_wait(&ring->irq_queue, &wait, 1296 prepare_to_wait(&ring->irq_queue, &wait, state);
1245 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1246 1297
1247 /* We need to check whether any gpu reset happened in between 1298 /* We need to check whether any gpu reset happened in between
1248 * the caller grabbing the seqno and now ... */ 1299 * the caller grabbing the seqno and now ... */
@@ -1260,7 +1311,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1260 break; 1311 break;
1261 } 1312 }
1262 1313
1263 if (interruptible && signal_pending(current)) { 1314 if (signal_pending_state(state, current)) {
1264 ret = -ERESTARTSYS; 1315 ret = -ERESTARTSYS;
1265 break; 1316 break;
1266 } 1317 }
@@ -2554,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2554 request->batch_obj = obj; 2605 request->batch_obj = obj;
2555 2606
2556 request->emitted_jiffies = jiffies; 2607 request->emitted_jiffies = jiffies;
2608 request->previous_seqno = ring->last_submitted_seqno;
2557 ring->last_submitted_seqno = request->seqno; 2609 ring->last_submitted_seqno = request->seqno;
2558 list_add_tail(&request->list, &ring->request_list); 2610 list_add_tail(&request->list, &ring->request_list);
2559 2611
@@ -4080,6 +4132,29 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4080 return false; 4132 return false;
4081} 4133}
4082 4134
4135void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
4136{
4137 struct drm_i915_gem_object *obj = vma->obj;
4138 bool mappable, fenceable;
4139 u32 fence_size, fence_alignment;
4140
4141 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4142 obj->base.size,
4143 obj->tiling_mode);
4144 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4145 obj->base.size,
4146 obj->tiling_mode,
4147 true);
4148
4149 fenceable = (vma->node.size == fence_size &&
4150 (vma->node.start & (fence_alignment - 1)) == 0);
4151
4152 mappable = (vma->node.start + fence_size <=
4153 to_i915(obj->base.dev)->gtt.mappable_end);
4154
4155 obj->map_and_fenceable = mappable && fenceable;
4156}
4157
4083static int 4158static int
4084i915_gem_object_do_pin(struct drm_i915_gem_object *obj, 4159i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4085 struct i915_address_space *vm, 4160 struct i915_address_space *vm,
@@ -4147,25 +4222,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4147 4222
4148 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL && 4223 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
4149 (bound ^ vma->bound) & GLOBAL_BIND) { 4224 (bound ^ vma->bound) & GLOBAL_BIND) {
4150 bool mappable, fenceable; 4225 __i915_vma_set_map_and_fenceable(vma);
4151 u32 fence_size, fence_alignment;
4152
4153 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4154 obj->base.size,
4155 obj->tiling_mode);
4156 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4157 obj->base.size,
4158 obj->tiling_mode,
4159 true);
4160
4161 fenceable = (vma->node.size == fence_size &&
4162 (vma->node.start & (fence_alignment - 1)) == 0);
4163
4164 mappable = (vma->node.start + fence_size <=
4165 dev_priv->gtt.mappable_end);
4166
4167 obj->map_and_fenceable = mappable && fenceable;
4168
4169 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); 4226 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4170 } 4227 }
4171 4228
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 43f35d12b677..86c7500454b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2676,6 +2676,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2676 return ret; 2676 return ret;
2677 } 2677 }
2678 vma->bound |= GLOBAL_BIND; 2678 vma->bound |= GLOBAL_BIND;
2679 __i915_vma_set_map_and_fenceable(vma);
2679 list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); 2680 list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2680 } 2681 }
2681 2682
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index cdacf3f5b77a..87e919a06b27 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -687,6 +687,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
687 } 687 }
688 688
689 vma->bound |= GLOBAL_BIND; 689 vma->bound |= GLOBAL_BIND;
690 __i915_vma_set_map_and_fenceable(vma);
690 list_add_tail(&vma->mm_list, &ggtt->inactive_list); 691 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
691 } 692 }
692 693
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 62211abe4922..beb0374a19f1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -116,6 +116,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc);
116static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 116static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117static void ironlake_pfit_enable(struct intel_crtc *crtc); 117static void ironlake_pfit_enable(struct intel_crtc *crtc);
118static void intel_modeset_setup_hw_state(struct drm_device *dev); 118static void intel_modeset_setup_hw_state(struct drm_device *dev);
119static void intel_pre_disable_primary(struct drm_crtc *crtc);
119 120
120typedef struct { 121typedef struct {
121 int min, max; 122 int min, max;
@@ -2607,6 +2608,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2607 struct drm_i915_gem_object *obj; 2608 struct drm_i915_gem_object *obj;
2608 struct drm_plane *primary = intel_crtc->base.primary; 2609 struct drm_plane *primary = intel_crtc->base.primary;
2609 struct drm_plane_state *plane_state = primary->state; 2610 struct drm_plane_state *plane_state = primary->state;
2611 struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2612 struct intel_plane *intel_plane = to_intel_plane(primary);
2610 struct drm_framebuffer *fb; 2613 struct drm_framebuffer *fb;
2611 2614
2612 if (!plane_config->fb) 2615 if (!plane_config->fb)
@@ -2643,6 +2646,18 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2643 } 2646 }
2644 } 2647 }
2645 2648
2649 /*
2650 * We've failed to reconstruct the BIOS FB. Current display state
2651 * indicates that the primary plane is visible, but has a NULL FB,
2652 * which will lead to problems later if we don't fix it up. The
2653 * simplest solution is to just disable the primary plane now and
2654 * pretend the BIOS never had it enabled.
2655 */
2656 to_intel_plane_state(plane_state)->visible = false;
2657 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2658 intel_pre_disable_primary(&intel_crtc->base);
2659 intel_plane->disable_plane(primary, &intel_crtc->base);
2660
2646 return; 2661 return;
2647 2662
2648valid_fb: 2663valid_fb:
@@ -9910,14 +9925,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9910 return true; 9925 return true;
9911} 9926}
9912 9927
9913static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 9928static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
9914{ 9929{
9915 struct drm_device *dev = crtc->dev; 9930 struct drm_device *dev = crtc->dev;
9916 struct drm_i915_private *dev_priv = dev->dev_private; 9931 struct drm_i915_private *dev_priv = dev->dev_private;
9917 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9932 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9918 uint32_t cntl = 0, size = 0; 9933 uint32_t cntl = 0, size = 0;
9919 9934
9920 if (base) { 9935 if (on) {
9921 unsigned int width = intel_crtc->base.cursor->state->crtc_w; 9936 unsigned int width = intel_crtc->base.cursor->state->crtc_w;
9922 unsigned int height = intel_crtc->base.cursor->state->crtc_h; 9937 unsigned int height = intel_crtc->base.cursor->state->crtc_h;
9923 unsigned int stride = roundup_pow_of_two(width) * 4; 9938 unsigned int stride = roundup_pow_of_two(width) * 4;
@@ -9972,16 +9987,15 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
9972 } 9987 }
9973} 9988}
9974 9989
9975static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 9990static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
9976{ 9991{
9977 struct drm_device *dev = crtc->dev; 9992 struct drm_device *dev = crtc->dev;
9978 struct drm_i915_private *dev_priv = dev->dev_private; 9993 struct drm_i915_private *dev_priv = dev->dev_private;
9979 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9994 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9980 int pipe = intel_crtc->pipe; 9995 int pipe = intel_crtc->pipe;
9981 uint32_t cntl; 9996 uint32_t cntl = 0;
9982 9997
9983 cntl = 0; 9998 if (on) {
9984 if (base) {
9985 cntl = MCURSOR_GAMMA_ENABLE; 9999 cntl = MCURSOR_GAMMA_ENABLE;
9986 switch (intel_crtc->base.cursor->state->crtc_w) { 10000 switch (intel_crtc->base.cursor->state->crtc_w) {
9987 case 64: 10001 case 64:
@@ -10032,18 +10046,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10032 int y = cursor_state->crtc_y; 10046 int y = cursor_state->crtc_y;
10033 u32 base = 0, pos = 0; 10047 u32 base = 0, pos = 0;
10034 10048
10035 if (on) 10049 base = intel_crtc->cursor_addr;
10036 base = intel_crtc->cursor_addr;
10037 10050
10038 if (x >= intel_crtc->config->pipe_src_w) 10051 if (x >= intel_crtc->config->pipe_src_w)
10039 base = 0; 10052 on = false;
10040 10053
10041 if (y >= intel_crtc->config->pipe_src_h) 10054 if (y >= intel_crtc->config->pipe_src_h)
10042 base = 0; 10055 on = false;
10043 10056
10044 if (x < 0) { 10057 if (x < 0) {
10045 if (x + cursor_state->crtc_w <= 0) 10058 if (x + cursor_state->crtc_w <= 0)
10046 base = 0; 10059 on = false;
10047 10060
10048 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10061 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10049 x = -x; 10062 x = -x;
@@ -10052,16 +10065,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10052 10065
10053 if (y < 0) { 10066 if (y < 0) {
10054 if (y + cursor_state->crtc_h <= 0) 10067 if (y + cursor_state->crtc_h <= 0)
10055 base = 0; 10068 on = false;
10056 10069
10057 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10070 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10058 y = -y; 10071 y = -y;
10059 } 10072 }
10060 pos |= y << CURSOR_Y_SHIFT; 10073 pos |= y << CURSOR_Y_SHIFT;
10061 10074
10062 if (base == 0 && intel_crtc->cursor_base == 0)
10063 return;
10064
10065 I915_WRITE(CURPOS(pipe), pos); 10075 I915_WRITE(CURPOS(pipe), pos);
10066 10076
10067 /* ILK+ do this automagically */ 10077 /* ILK+ do this automagically */
@@ -10072,9 +10082,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10072 } 10082 }
10073 10083
10074 if (IS_845G(dev) || IS_I865G(dev)) 10084 if (IS_845G(dev) || IS_I865G(dev))
10075 i845_update_cursor(crtc, base); 10085 i845_update_cursor(crtc, base, on);
10076 else 10086 else
10077 i9xx_update_cursor(crtc, base); 10087 i9xx_update_cursor(crtc, base, on);
10078} 10088}
10079 10089
10080static bool cursor_size_ok(struct drm_device *dev, 10090static bool cursor_size_ok(struct drm_device *dev,
@@ -13718,6 +13728,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
13718 struct drm_crtc *crtc = crtc_state->base.crtc; 13728 struct drm_crtc *crtc = crtc_state->base.crtc;
13719 struct drm_framebuffer *fb = state->base.fb; 13729 struct drm_framebuffer *fb = state->base.fb;
13720 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13730 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13731 enum pipe pipe = to_intel_plane(plane)->pipe;
13721 unsigned stride; 13732 unsigned stride;
13722 int ret; 13733 int ret;
13723 13734
@@ -13751,6 +13762,22 @@ intel_check_cursor_plane(struct drm_plane *plane,
13751 return -EINVAL; 13762 return -EINVAL;
13752 } 13763 }
13753 13764
13765 /*
13766 * There's something wrong with the cursor on CHV pipe C.
13767 * If it straddles the left edge of the screen then
13768 * moving it away from the edge or disabling it often
13769 * results in a pipe underrun, and often that can lead to
13770 * dead pipe (constant underrun reported, and it scans
13771 * out just a solid color). To recover from that, the
13772 * display power well must be turned off and on again.
13773 * Refuse the put the cursor into that compromised position.
13774 */
13775 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
13776 state->visible && state->base.crtc_x < 0) {
13777 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
13778 return -EINVAL;
13779 }
13780
13754 return 0; 13781 return 0;
13755} 13782}
13756 13783
@@ -13774,9 +13801,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
13774 crtc = crtc ? crtc : plane->crtc; 13801 crtc = crtc ? crtc : plane->crtc;
13775 intel_crtc = to_intel_crtc(crtc); 13802 intel_crtc = to_intel_crtc(crtc);
13776 13803
13777 if (intel_crtc->cursor_bo == obj)
13778 goto update;
13779
13780 if (!obj) 13804 if (!obj)
13781 addr = 0; 13805 addr = 0;
13782 else if (!INTEL_INFO(dev)->cursor_needs_physical) 13806 else if (!INTEL_INFO(dev)->cursor_needs_physical)
@@ -13785,9 +13809,7 @@ intel_commit_cursor_plane(struct drm_plane *plane,
13785 addr = obj->phys_handle->busaddr; 13809 addr = obj->phys_handle->busaddr;
13786 13810
13787 intel_crtc->cursor_addr = addr; 13811 intel_crtc->cursor_addr = addr;
13788 intel_crtc->cursor_bo = obj;
13789 13812
13790update:
13791 if (crtc->state->active) 13813 if (crtc->state->active)
13792 intel_crtc_update_cursor(crtc, state->visible); 13814 intel_crtc_update_cursor(crtc, state->visible);
13793} 13815}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f2a1142bff34..0d00f07b7163 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -550,7 +550,6 @@ struct intel_crtc {
550 int adjusted_x; 550 int adjusted_x;
551 int adjusted_y; 551 int adjusted_y;
552 552
553 struct drm_i915_gem_object *cursor_bo;
554 uint32_t cursor_addr; 553 uint32_t cursor_addr;
555 uint32_t cursor_cntl; 554 uint32_t cursor_cntl;
556 uint32_t cursor_size; 555 uint32_t cursor_size;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 81cdd9ff3892..64086f2d4e26 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1374,17 +1374,18 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1374 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1374 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1375 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1375 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1376 bool live_status = false; 1376 bool live_status = false;
1377 unsigned int retry = 3; 1377 unsigned int try;
1378 1378
1379 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1379 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1380 connector->base.id, connector->name); 1380 connector->base.id, connector->name);
1381 1381
1382 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); 1382 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1383 1383
1384 while (!live_status && --retry) { 1384 for (try = 0; !live_status && try < 4; try++) {
1385 if (try)
1386 msleep(10);
1385 live_status = intel_digital_port_connected(dev_priv, 1387 live_status = intel_digital_port_connected(dev_priv,
1386 hdmi_to_dig_port(intel_hdmi)); 1388 hdmi_to_dig_port(intel_hdmi));
1387 mdelay(10);
1388 } 1389 }
1389 1390
1390 if (!live_status) 1391 if (!live_status)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 43006db6fd58..80fed7e78dcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
83 fan->type = NVBIOS_THERM_FAN_UNK; 83 fan->type = NVBIOS_THERM_FAN_UNK;
84 } 84 }
85 85
86 fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
86 fan->min_duty = nvbios_rd08(bios, data + 0x02); 87 fan->min_duty = nvbios_rd08(bios, data + 0x02);
87 fan->max_duty = nvbios_rd08(bios, data + 0x03); 88 fan->max_duty = nvbios_rd08(bios, data + 0x03);
88 89
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dbedc58d8c00..61aacab424cf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4326,8 +4326,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4326 } 4326 }
4327 mddev_unlock(mddev); 4327 mddev_unlock(mddev);
4328 } 4328 }
4329 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4329 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4330 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
4331 return -EBUSY; 4330 return -EBUSY;
4332 else if (cmd_match(page, "resync")) 4331 else if (cmd_match(page, "resync"))
4333 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4332 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -4340,8 +4339,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4340 return -EINVAL; 4339 return -EINVAL;
4341 err = mddev_lock(mddev); 4340 err = mddev_lock(mddev);
4342 if (!err) { 4341 if (!err) {
4343 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4342 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4344 err = mddev->pers->start_reshape(mddev); 4343 err = -EBUSY;
4344 else {
4345 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4346 err = mddev->pers->start_reshape(mddev);
4347 }
4345 mddev_unlock(mddev); 4348 mddev_unlock(mddev);
4346 } 4349 }
4347 if (err) 4350 if (err)
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index e87459f6d686..acd1460cf787 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/sched.h>
25#include <linux/spinlock.h> 26#include <linux/spinlock.h>
26#include <linux/types.h> 27#include <linux/types.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index b077e43b5ba9..c4cb15a3098c 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -236,7 +236,7 @@ int ubi_debugfs_init(void)
236 236
237 dfs_rootdir = debugfs_create_dir("ubi", NULL); 237 dfs_rootdir = debugfs_create_dir("ubi", NULL);
238 if (IS_ERR_OR_NULL(dfs_rootdir)) { 238 if (IS_ERR_OR_NULL(dfs_rootdir)) {
239 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); 239 int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV;
240 240
241 pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n", 241 pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
242 err); 242 err);
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 1fc23e48fe8e..10cf3b549959 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1299,7 +1299,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1299 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) 1299 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1300 goto exit; 1300 goto exit;
1301 1301
1302 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); 1302 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1303 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); 1303 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1304 if (hdr_crc != crc) { 1304 if (hdr_crc != crc) {
1305 ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x", 1305 ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index eb4489f9082f..56065632a5b8 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -603,6 +603,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
603 return 0; 603 return 0;
604} 604}
605 605
606static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
606/** 607/**
607 * do_sync_erase - run the erase worker synchronously. 608 * do_sync_erase - run the erase worker synchronously.
608 * @ubi: UBI device description object 609 * @ubi: UBI device description object
@@ -615,20 +616,16 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
615static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 616static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
616 int vol_id, int lnum, int torture) 617 int vol_id, int lnum, int torture)
617{ 618{
618 struct ubi_work *wl_wrk; 619 struct ubi_work wl_wrk;
619 620
620 dbg_wl("sync erase of PEB %i", e->pnum); 621 dbg_wl("sync erase of PEB %i", e->pnum);
621 622
622 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 623 wl_wrk.e = e;
623 if (!wl_wrk) 624 wl_wrk.vol_id = vol_id;
624 return -ENOMEM; 625 wl_wrk.lnum = lnum;
625 626 wl_wrk.torture = torture;
626 wl_wrk->e = e;
627 wl_wrk->vol_id = vol_id;
628 wl_wrk->lnum = lnum;
629 wl_wrk->torture = torture;
630 627
631 return erase_worker(ubi, wl_wrk, 0); 628 return __erase_worker(ubi, &wl_wrk);
632} 629}
633 630
634/** 631/**
@@ -1014,7 +1011,7 @@ out_unlock:
1014} 1011}
1015 1012
1016/** 1013/**
1017 * erase_worker - physical eraseblock erase worker function. 1014 * __erase_worker - physical eraseblock erase worker function.
1018 * @ubi: UBI device description object 1015 * @ubi: UBI device description object
1019 * @wl_wrk: the work object 1016 * @wl_wrk: the work object
1020 * @shutdown: non-zero if the worker has to free memory and exit 1017 * @shutdown: non-zero if the worker has to free memory and exit
@@ -1025,8 +1022,7 @@ out_unlock:
1025 * needed. Returns zero in case of success and a negative error code in case of 1022 * needed. Returns zero in case of success and a negative error code in case of
1026 * failure. 1023 * failure.
1027 */ 1024 */
1028static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 1025static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1029 int shutdown)
1030{ 1026{
1031 struct ubi_wl_entry *e = wl_wrk->e; 1027 struct ubi_wl_entry *e = wl_wrk->e;
1032 int pnum = e->pnum; 1028 int pnum = e->pnum;
@@ -1034,21 +1030,11 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1034 int lnum = wl_wrk->lnum; 1030 int lnum = wl_wrk->lnum;
1035 int err, available_consumed = 0; 1031 int err, available_consumed = 0;
1036 1032
1037 if (shutdown) {
1038 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1039 kfree(wl_wrk);
1040 wl_entry_destroy(ubi, e);
1041 return 0;
1042 }
1043
1044 dbg_wl("erase PEB %d EC %d LEB %d:%d", 1033 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1045 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); 1034 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1046 1035
1047 err = sync_erase(ubi, e, wl_wrk->torture); 1036 err = sync_erase(ubi, e, wl_wrk->torture);
1048 if (!err) { 1037 if (!err) {
1049 /* Fine, we've erased it successfully */
1050 kfree(wl_wrk);
1051
1052 spin_lock(&ubi->wl_lock); 1038 spin_lock(&ubi->wl_lock);
1053 wl_tree_add(e, &ubi->free); 1039 wl_tree_add(e, &ubi->free);
1054 ubi->free_count++; 1040 ubi->free_count++;
@@ -1066,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1066 } 1052 }
1067 1053
1068 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err); 1054 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1069 kfree(wl_wrk);
1070 1055
1071 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1056 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1072 err == -EBUSY) { 1057 err == -EBUSY) {
@@ -1075,6 +1060,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1075 /* Re-schedule the LEB for erasure */ 1060 /* Re-schedule the LEB for erasure */
1076 err1 = schedule_erase(ubi, e, vol_id, lnum, 0); 1061 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1077 if (err1) { 1062 if (err1) {
1063 wl_entry_destroy(ubi, e);
1078 err = err1; 1064 err = err1;
1079 goto out_ro; 1065 goto out_ro;
1080 } 1066 }
@@ -1150,6 +1136,25 @@ out_ro:
1150 return err; 1136 return err;
1151} 1137}
1152 1138
1139static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1140 int shutdown)
1141{
1142 int ret;
1143
1144 if (shutdown) {
1145 struct ubi_wl_entry *e = wl_wrk->e;
1146
1147 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1148 kfree(wl_wrk);
1149 wl_entry_destroy(ubi, e);
1150 return 0;
1151 }
1152
1153 ret = __erase_worker(ubi, wl_wrk);
1154 kfree(wl_wrk);
1155 return ret;
1156}
1157
1153/** 1158/**
1154 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. 1159 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1155 * @ubi: UBI device description object 1160 * @ubi: UBI device description object
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9e294ff4e652..0c67b57be83c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2540,8 +2540,17 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2540{ 2540{
2541 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue); 2541 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
2542 2542
2543 if (kill) 2543 if (kill) {
2544 blk_set_queue_dying(ns->queue); 2544 blk_set_queue_dying(ns->queue);
2545
2546 /*
2547 * The controller was shutdown first if we got here through
2548 * device removal. The shutdown may requeue outstanding
2549 * requests. These need to be aborted immediately so
2550 * del_gendisk doesn't block indefinitely for their completion.
2551 */
2552 blk_mq_abort_requeue_list(ns->queue);
2553 }
2545 if (ns->disk->flags & GENHD_FL_UP) 2554 if (ns->disk->flags & GENHD_FL_UP)
2546 del_gendisk(ns->disk); 2555 del_gendisk(ns->disk);
2547 if (kill || !blk_queue_dying(ns->queue)) { 2556 if (kill || !blk_queue_dying(ns->queue)) {
@@ -2977,6 +2986,15 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2977{ 2986{
2978 struct nvme_ns *ns, *next; 2987 struct nvme_ns *ns, *next;
2979 2988
2989 if (nvme_io_incapable(dev)) {
2990 /*
2991 * If the device is not capable of IO (surprise hot-removal,
2992 * for example), we need to quiesce prior to deleting the
2993 * namespaces. This will end outstanding requests and prevent
2994 * attempts to sync dirty data.
2995 */
2996 nvme_dev_shutdown(dev);
2997 }
2980 list_for_each_entry_safe(ns, next, &dev->namespaces, list) 2998 list_for_each_entry_safe(ns, next, &dev->namespaces, list)
2981 nvme_ns_remove(ns); 2999 nvme_ns_remove(ns);
2982} 3000}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 61f768518a34..24ec282e15d8 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -599,8 +599,10 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
599 status = ap_sm_recv(ap_dev); 599 status = ap_sm_recv(ap_dev);
600 switch (status.response_code) { 600 switch (status.response_code) {
601 case AP_RESPONSE_NORMAL: 601 case AP_RESPONSE_NORMAL:
602 if (ap_dev->queue_count > 0) 602 if (ap_dev->queue_count > 0) {
603 ap_dev->state = AP_STATE_WORKING;
603 return AP_WAIT_AGAIN; 604 return AP_WAIT_AGAIN;
605 }
604 ap_dev->state = AP_STATE_IDLE; 606 ap_dev->state = AP_STATE_IDLE;
605 return AP_WAIT_NONE; 607 return AP_WAIT_NONE;
606 case AP_RESPONSE_NO_PENDING_REPLY: 608 case AP_RESPONSE_NO_PENDING_REPLY:
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index b2a1a81e6fc8..1b831598df7c 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -984,6 +984,36 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
984 return vq; 984 return vq;
985} 985}
986 986
987static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
988 __u32 activity)
989{
990 if (vcdev->curr_io & activity) {
991 switch (activity) {
992 case VIRTIO_CCW_DOING_READ_FEAT:
993 case VIRTIO_CCW_DOING_WRITE_FEAT:
994 case VIRTIO_CCW_DOING_READ_CONFIG:
995 case VIRTIO_CCW_DOING_WRITE_CONFIG:
996 case VIRTIO_CCW_DOING_WRITE_STATUS:
997 case VIRTIO_CCW_DOING_SET_VQ:
998 case VIRTIO_CCW_DOING_SET_IND:
999 case VIRTIO_CCW_DOING_SET_CONF_IND:
1000 case VIRTIO_CCW_DOING_RESET:
1001 case VIRTIO_CCW_DOING_READ_VQ_CONF:
1002 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
1003 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
1004 vcdev->curr_io &= ~activity;
1005 wake_up(&vcdev->wait_q);
1006 break;
1007 default:
1008 /* don't know what to do... */
1009 dev_warn(&vcdev->cdev->dev,
1010 "Suspicious activity '%08x'\n", activity);
1011 WARN_ON(1);
1012 break;
1013 }
1014 }
1015}
1016
987static void virtio_ccw_int_handler(struct ccw_device *cdev, 1017static void virtio_ccw_int_handler(struct ccw_device *cdev,
988 unsigned long intparm, 1018 unsigned long intparm,
989 struct irb *irb) 1019 struct irb *irb)
@@ -995,6 +1025,12 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
995 1025
996 if (!vcdev) 1026 if (!vcdev)
997 return; 1027 return;
1028 if (IS_ERR(irb)) {
1029 vcdev->err = PTR_ERR(irb);
1030 virtio_ccw_check_activity(vcdev, activity);
1031 /* Don't poke around indicators, something's wrong. */
1032 return;
1033 }
998 /* Check if it's a notification from the host. */ 1034 /* Check if it's a notification from the host. */
999 if ((intparm == 0) && 1035 if ((intparm == 0) &&
1000 (scsw_stctl(&irb->scsw) == 1036 (scsw_stctl(&irb->scsw) ==
@@ -1010,31 +1046,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
1010 /* Map everything else to -EIO. */ 1046 /* Map everything else to -EIO. */
1011 vcdev->err = -EIO; 1047 vcdev->err = -EIO;
1012 } 1048 }
1013 if (vcdev->curr_io & activity) { 1049 virtio_ccw_check_activity(vcdev, activity);
1014 switch (activity) {
1015 case VIRTIO_CCW_DOING_READ_FEAT:
1016 case VIRTIO_CCW_DOING_WRITE_FEAT:
1017 case VIRTIO_CCW_DOING_READ_CONFIG:
1018 case VIRTIO_CCW_DOING_WRITE_CONFIG:
1019 case VIRTIO_CCW_DOING_WRITE_STATUS:
1020 case VIRTIO_CCW_DOING_SET_VQ:
1021 case VIRTIO_CCW_DOING_SET_IND:
1022 case VIRTIO_CCW_DOING_SET_CONF_IND:
1023 case VIRTIO_CCW_DOING_RESET:
1024 case VIRTIO_CCW_DOING_READ_VQ_CONF:
1025 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
1026 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
1027 vcdev->curr_io &= ~activity;
1028 wake_up(&vcdev->wait_q);
1029 break;
1030 default:
1031 /* don't know what to do... */
1032 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
1033 activity);
1034 WARN_ON(1);
1035 break;
1036 }
1037 }
1038 for_each_set_bit(i, &vcdev->indicators, 1050 for_each_set_bit(i, &vcdev->indicators,
1039 sizeof(vcdev->indicators) * BITS_PER_BYTE) { 1051 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
1040 /* The bit clear must happen before the vring kick. */ 1052 /* The bit clear must happen before the vring kick. */
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 064031870ba0..ca0d3802f2af 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -148,8 +148,10 @@ static int receive_chars_read(struct uart_port *port)
148 uart_handle_dcd_change(port, 1); 148 uart_handle_dcd_change(port, 1);
149 } 149 }
150 150
151 for (i = 0; i < bytes_read; i++) 151 if (port->sysrq != 0 && *con_read_page) {
152 uart_handle_sysrq_char(port, con_read_page[i]); 152 for (i = 0; i < bytes_read; i++)
153 uart_handle_sysrq_char(port, con_read_page[i]);
154 }
153 155
154 if (port->state == NULL) 156 if (port->state == NULL)
155 continue; 157 continue;
@@ -168,17 +170,17 @@ struct sunhv_ops {
168 int (*receive_chars)(struct uart_port *port); 170 int (*receive_chars)(struct uart_port *port);
169}; 171};
170 172
171static struct sunhv_ops bychar_ops = { 173static const struct sunhv_ops bychar_ops = {
172 .transmit_chars = transmit_chars_putchar, 174 .transmit_chars = transmit_chars_putchar,
173 .receive_chars = receive_chars_getchar, 175 .receive_chars = receive_chars_getchar,
174}; 176};
175 177
176static struct sunhv_ops bywrite_ops = { 178static const struct sunhv_ops bywrite_ops = {
177 .transmit_chars = transmit_chars_write, 179 .transmit_chars = transmit_chars_write,
178 .receive_chars = receive_chars_read, 180 .receive_chars = receive_chars_read,
179}; 181};
180 182
181static struct sunhv_ops *sunhv_ops = &bychar_ops; 183static const struct sunhv_ops *sunhv_ops = &bychar_ops;
182 184
183static struct tty_port *receive_chars(struct uart_port *port) 185static struct tty_port *receive_chars(struct uart_port *port)
184{ 186{
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 9ffef06b30d5..c9d6c715c0fb 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -616,6 +616,7 @@ nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
616 616
617 mutex_lock(&ls->ls_mutex); 617 mutex_lock(&ls->ls_mutex);
618 nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid); 618 nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
619 mutex_unlock(&ls->ls_mutex);
619} 620}
620 621
621static int 622static int
@@ -659,7 +660,6 @@ nfsd4_cb_layout_release(struct nfsd4_callback *cb)
659 660
660 trace_layout_recall_release(&ls->ls_stid.sc_stateid); 661 trace_layout_recall_release(&ls->ls_stid.sc_stateid);
661 662
662 mutex_unlock(&ls->ls_mutex);
663 nfsd4_return_all_layouts(ls, &reaplist); 663 nfsd4_return_all_layouts(ls, &reaplist);
664 nfsd4_free_layouts(&reaplist); 664 nfsd4_free_layouts(&reaplist);
665 nfs4_put_stid(&ls->ls_stid); 665 nfs4_put_stid(&ls->ls_stid);
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 698768bdc581..301d70b0174f 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -48,12 +48,17 @@
48 48
49static int fd_map; /* File descriptor for file being modified. */ 49static int fd_map; /* File descriptor for file being modified. */
50static int mmap_failed; /* Boolean flag. */ 50static int mmap_failed; /* Boolean flag. */
51static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
52static char gpfx; /* prefix for global symbol name (sometimes '_') */ 51static char gpfx; /* prefix for global symbol name (sometimes '_') */
53static struct stat sb; /* Remember .st_size, etc. */ 52static struct stat sb; /* Remember .st_size, etc. */
54static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */ 53static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */
55static const char *altmcount; /* alternate mcount symbol name */ 54static const char *altmcount; /* alternate mcount symbol name */
56static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */ 55static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */
56static void *file_map; /* pointer of the mapped file */
57static void *file_end; /* pointer to the end of the mapped file */
58static int file_updated; /* flag to state file was changed */
59static void *file_ptr; /* current file pointer location */
60static void *file_append; /* added to the end of the file */
61static size_t file_append_size; /* how much is added to end of file */
57 62
58/* setjmp() return values */ 63/* setjmp() return values */
59enum { 64enum {
@@ -67,10 +72,14 @@ static void
67cleanup(void) 72cleanup(void)
68{ 73{
69 if (!mmap_failed) 74 if (!mmap_failed)
70 munmap(ehdr_curr, sb.st_size); 75 munmap(file_map, sb.st_size);
71 else 76 else
72 free(ehdr_curr); 77 free(file_map);
73 close(fd_map); 78 file_map = NULL;
79 free(file_append);
80 file_append = NULL;
81 file_append_size = 0;
82 file_updated = 0;
74} 83}
75 84
76static void __attribute__((noreturn)) 85static void __attribute__((noreturn))
@@ -92,12 +101,22 @@ succeed_file(void)
92static off_t 101static off_t
93ulseek(int const fd, off_t const offset, int const whence) 102ulseek(int const fd, off_t const offset, int const whence)
94{ 103{
95 off_t const w = lseek(fd, offset, whence); 104 switch (whence) {
96 if (w == (off_t)-1) { 105 case SEEK_SET:
97 perror("lseek"); 106 file_ptr = file_map + offset;
107 break;
108 case SEEK_CUR:
109 file_ptr += offset;
110 break;
111 case SEEK_END:
112 file_ptr = file_map + (sb.st_size - offset);
113 break;
114 }
115 if (file_ptr < file_map) {
116 fprintf(stderr, "lseek: seek before file\n");
98 fail_file(); 117 fail_file();
99 } 118 }
100 return w; 119 return file_ptr - file_map;
101} 120}
102 121
103static size_t 122static size_t
@@ -114,12 +133,38 @@ uread(int const fd, void *const buf, size_t const count)
114static size_t 133static size_t
115uwrite(int const fd, void const *const buf, size_t const count) 134uwrite(int const fd, void const *const buf, size_t const count)
116{ 135{
117 size_t const n = write(fd, buf, count); 136 size_t cnt = count;
118 if (n != count) { 137 off_t idx = 0;
119 perror("write"); 138
120 fail_file(); 139 file_updated = 1;
140
141 if (file_ptr + count >= file_end) {
142 off_t aoffset = (file_ptr + count) - file_end;
143
144 if (aoffset > file_append_size) {
145 file_append = realloc(file_append, aoffset);
146 file_append_size = aoffset;
147 }
148 if (!file_append) {
149 perror("write");
150 fail_file();
151 }
152 if (file_ptr < file_end) {
153 cnt = file_end - file_ptr;
154 } else {
155 cnt = 0;
156 idx = aoffset - count;
157 }
121 } 158 }
122 return n; 159
160 if (cnt)
161 memcpy(file_ptr, buf, cnt);
162
163 if (cnt < count)
164 memcpy(file_append + idx, buf + cnt, count - cnt);
165
166 file_ptr += count;
167 return count;
123} 168}
124 169
125static void * 170static void *
@@ -192,9 +237,7 @@ static int make_nop_arm64(void *map, size_t const offset)
192 */ 237 */
193static void *mmap_file(char const *fname) 238static void *mmap_file(char const *fname)
194{ 239{
195 void *addr; 240 fd_map = open(fname, O_RDONLY);
196
197 fd_map = open(fname, O_RDWR);
198 if (fd_map < 0 || fstat(fd_map, &sb) < 0) { 241 if (fd_map < 0 || fstat(fd_map, &sb) < 0) {
199 perror(fname); 242 perror(fname);
200 fail_file(); 243 fail_file();
@@ -203,15 +246,58 @@ static void *mmap_file(char const *fname)
203 fprintf(stderr, "not a regular file: %s\n", fname); 246 fprintf(stderr, "not a regular file: %s\n", fname);
204 fail_file(); 247 fail_file();
205 } 248 }
206 addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, 249 file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE,
207 fd_map, 0); 250 fd_map, 0);
208 mmap_failed = 0; 251 mmap_failed = 0;
209 if (addr == MAP_FAILED) { 252 if (file_map == MAP_FAILED) {
210 mmap_failed = 1; 253 mmap_failed = 1;
211 addr = umalloc(sb.st_size); 254 file_map = umalloc(sb.st_size);
212 uread(fd_map, addr, sb.st_size); 255 uread(fd_map, file_map, sb.st_size);
256 }
257 close(fd_map);
258
259 file_end = file_map + sb.st_size;
260
261 return file_map;
262}
263
264static void write_file(const char *fname)
265{
266 char tmp_file[strlen(fname) + 4];
267 size_t n;
268
269 if (!file_updated)
270 return;
271
272 sprintf(tmp_file, "%s.rc", fname);
273
274 /*
275 * After reading the entire file into memory, delete it
276 * and write it back, to prevent weird side effects of modifying
277 * an object file in place.
278 */
279 fd_map = open(tmp_file, O_WRONLY | O_TRUNC | O_CREAT, sb.st_mode);
280 if (fd_map < 0) {
281 perror(fname);
282 fail_file();
283 }
284 n = write(fd_map, file_map, sb.st_size);
285 if (n != sb.st_size) {
286 perror("write");
287 fail_file();
288 }
289 if (file_append_size) {
290 n = write(fd_map, file_append, file_append_size);
291 if (n != file_append_size) {
292 perror("write");
293 fail_file();
294 }
295 }
296 close(fd_map);
297 if (rename(tmp_file, fname) < 0) {
298 perror(fname);
299 fail_file();
213 } 300 }
214 return addr;
215} 301}
216 302
217/* w8rev, w8nat, ...: Handle endianness. */ 303/* w8rev, w8nat, ...: Handle endianness. */
@@ -318,7 +404,6 @@ do_file(char const *const fname)
318 Elf32_Ehdr *const ehdr = mmap_file(fname); 404 Elf32_Ehdr *const ehdr = mmap_file(fname);
319 unsigned int reltype = 0; 405 unsigned int reltype = 0;
320 406
321 ehdr_curr = ehdr;
322 w = w4nat; 407 w = w4nat;
323 w2 = w2nat; 408 w2 = w2nat;
324 w8 = w8nat; 409 w8 = w8nat;
@@ -441,6 +526,7 @@ do_file(char const *const fname)
441 } 526 }
442 } /* end switch */ 527 } /* end switch */
443 528
529 write_file(fname);
444 cleanup(); 530 cleanup();
445} 531}
446 532
@@ -493,11 +579,14 @@ main(int argc, char *argv[])
493 case SJ_SETJMP: /* normal sequence */ 579 case SJ_SETJMP: /* normal sequence */
494 /* Avoid problems if early cleanup() */ 580 /* Avoid problems if early cleanup() */
495 fd_map = -1; 581 fd_map = -1;
496 ehdr_curr = NULL;
497 mmap_failed = 1; 582 mmap_failed = 1;
583 file_map = NULL;
584 file_ptr = NULL;
585 file_updated = 0;
498 do_file(file); 586 do_file(file);
499 break; 587 break;
500 case SJ_FAIL: /* error in do_file or below */ 588 case SJ_FAIL: /* error in do_file or below */
589 sprintf("%s: failed\n", file);
501 ++n_error; 590 ++n_error;
502 break; 591 break;
503 case SJ_SUCCEED: /* premature success */ 592 case SJ_SUCCEED: /* premature success */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index bff5c8b329d1..3b3658297070 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -954,6 +954,36 @@ static int azx_resume(struct device *dev)
954} 954}
955#endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */ 955#endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
956 956
957#ifdef CONFIG_PM_SLEEP
958/* put codec down to D3 at hibernation for Intel SKL+;
959 * otherwise BIOS may still access the codec and screw up the driver
960 */
961#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
962#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
963#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
964#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
965
966static int azx_freeze_noirq(struct device *dev)
967{
968 struct pci_dev *pci = to_pci_dev(dev);
969
970 if (IS_SKL_PLUS(pci))
971 pci_set_power_state(pci, PCI_D3hot);
972
973 return 0;
974}
975
976static int azx_thaw_noirq(struct device *dev)
977{
978 struct pci_dev *pci = to_pci_dev(dev);
979
980 if (IS_SKL_PLUS(pci))
981 pci_set_power_state(pci, PCI_D0);
982
983 return 0;
984}
985#endif /* CONFIG_PM_SLEEP */
986
957#ifdef CONFIG_PM 987#ifdef CONFIG_PM
958static int azx_runtime_suspend(struct device *dev) 988static int azx_runtime_suspend(struct device *dev)
959{ 989{
@@ -1063,6 +1093,10 @@ static int azx_runtime_idle(struct device *dev)
1063 1093
1064static const struct dev_pm_ops azx_pm = { 1094static const struct dev_pm_ops azx_pm = {
1065 SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) 1095 SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
1096#ifdef CONFIG_PM_SLEEP
1097 .freeze_noirq = azx_freeze_noirq,
1098 .thaw_noirq = azx_thaw_noirq,
1099#endif
1066 SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle) 1100 SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
1067}; 1101};
1068 1102
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6c268dad143f..fe96428aa403 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1775,6 +1775,7 @@ enum {
1775 ALC889_FIXUP_MBA11_VREF, 1775 ALC889_FIXUP_MBA11_VREF,
1776 ALC889_FIXUP_MBA21_VREF, 1776 ALC889_FIXUP_MBA21_VREF,
1777 ALC889_FIXUP_MP11_VREF, 1777 ALC889_FIXUP_MP11_VREF,
1778 ALC889_FIXUP_MP41_VREF,
1778 ALC882_FIXUP_INV_DMIC, 1779 ALC882_FIXUP_INV_DMIC,
1779 ALC882_FIXUP_NO_PRIMARY_HP, 1780 ALC882_FIXUP_NO_PRIMARY_HP,
1780 ALC887_FIXUP_ASUS_BASS, 1781 ALC887_FIXUP_ASUS_BASS,
@@ -1863,7 +1864,7 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
1863 const struct hda_fixup *fix, int action) 1864 const struct hda_fixup *fix, int action)
1864{ 1865{
1865 struct alc_spec *spec = codec->spec; 1866 struct alc_spec *spec = codec->spec;
1866 static hda_nid_t nids[2] = { 0x14, 0x15 }; 1867 static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 };
1867 int i; 1868 int i;
1868 1869
1869 if (action != HDA_FIXUP_ACT_INIT) 1870 if (action != HDA_FIXUP_ACT_INIT)
@@ -2153,6 +2154,12 @@ static const struct hda_fixup alc882_fixups[] = {
2153 .chained = true, 2154 .chained = true,
2154 .chain_id = ALC885_FIXUP_MACPRO_GPIO, 2155 .chain_id = ALC885_FIXUP_MACPRO_GPIO,
2155 }, 2156 },
2157 [ALC889_FIXUP_MP41_VREF] = {
2158 .type = HDA_FIXUP_FUNC,
2159 .v.func = alc889_fixup_mbp_vref,
2160 .chained = true,
2161 .chain_id = ALC885_FIXUP_MACPRO_GPIO,
2162 },
2156 [ALC882_FIXUP_INV_DMIC] = { 2163 [ALC882_FIXUP_INV_DMIC] = {
2157 .type = HDA_FIXUP_FUNC, 2164 .type = HDA_FIXUP_FUNC,
2158 .v.func = alc_fixup_inv_dmic, 2165 .v.func = alc_fixup_inv_dmic,
@@ -2235,7 +2242,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2235 SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF), 2242 SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
2236 SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF), 2243 SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
2237 SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF), 2244 SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
2238 SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO), 2245 SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 4,1/5,1", ALC889_FIXUP_MP41_VREF),
2239 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), 2246 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
2240 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), 2247 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
2241 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), 2248 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index 84f5eb07a91b..afa6c5db9dcc 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -85,7 +85,15 @@ static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0);
85static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); 85static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
86static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0); 86static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0);
87 87
88static const int deemph_settings[] = { 0, 32000, 44100, 48000 }; 88static const struct {
89 int rate;
90 unsigned int val;
91} deemph_settings[] = {
92 { 0, ES8328_DACCONTROL6_DEEMPH_OFF },
93 { 32000, ES8328_DACCONTROL6_DEEMPH_32k },
94 { 44100, ES8328_DACCONTROL6_DEEMPH_44_1k },
95 { 48000, ES8328_DACCONTROL6_DEEMPH_48k },
96};
89 97
90static int es8328_set_deemph(struct snd_soc_codec *codec) 98static int es8328_set_deemph(struct snd_soc_codec *codec)
91{ 99{
@@ -97,21 +105,22 @@ static int es8328_set_deemph(struct snd_soc_codec *codec)
97 * rate. 105 * rate.
98 */ 106 */
99 if (es8328->deemph) { 107 if (es8328->deemph) {
100 best = 1; 108 best = 0;
101 for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) { 109 for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) {
102 if (abs(deemph_settings[i] - es8328->playback_fs) < 110 if (abs(deemph_settings[i].rate - es8328->playback_fs) <
103 abs(deemph_settings[best] - es8328->playback_fs)) 111 abs(deemph_settings[best].rate - es8328->playback_fs))
104 best = i; 112 best = i;
105 } 113 }
106 114
107 val = best << 1; 115 val = deemph_settings[best].val;
108 } else { 116 } else {
109 val = 0; 117 val = ES8328_DACCONTROL6_DEEMPH_OFF;
110 } 118 }
111 119
112 dev_dbg(codec->dev, "Set deemphasis %d\n", val); 120 dev_dbg(codec->dev, "Set deemphasis %d\n", val);
113 121
114 return snd_soc_update_bits(codec, ES8328_DACCONTROL6, 0x6, val); 122 return snd_soc_update_bits(codec, ES8328_DACCONTROL6,
123 ES8328_DACCONTROL6_DEEMPH_MASK, val);
115} 124}
116 125
117static int es8328_get_deemph(struct snd_kcontrol *kcontrol, 126static int es8328_get_deemph(struct snd_kcontrol *kcontrol,
diff --git a/sound/soc/codecs/es8328.h b/sound/soc/codecs/es8328.h
index cb36afe10c0e..156c748c89c7 100644
--- a/sound/soc/codecs/es8328.h
+++ b/sound/soc/codecs/es8328.h
@@ -153,6 +153,7 @@ int es8328_probe(struct device *dev, struct regmap *regmap);
153#define ES8328_DACCONTROL6_CLICKFREE (1 << 3) 153#define ES8328_DACCONTROL6_CLICKFREE (1 << 3)
154#define ES8328_DACCONTROL6_DAC_INVR (1 << 4) 154#define ES8328_DACCONTROL6_DAC_INVR (1 << 4)
155#define ES8328_DACCONTROL6_DAC_INVL (1 << 5) 155#define ES8328_DACCONTROL6_DAC_INVL (1 << 5)
156#define ES8328_DACCONTROL6_DEEMPH_MASK (3 << 6)
156#define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6) 157#define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6)
157#define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6) 158#define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6)
158#define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6) 159#define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6)
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index f540f82b1f27..08b40460663c 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -189,6 +189,7 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
189 case SND_SOC_DAPM_POST_PMU: 189 case SND_SOC_DAPM_POST_PMU:
190 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, 190 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
191 SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP); 191 SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
192 msleep(400);
192 break; 193 break;
193 194
194 case SND_SOC_DAPM_PRE_PMD: 195 case SND_SOC_DAPM_PRE_PMD:
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 0a60677397b3..4c29bd2ae75c 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -574,6 +574,7 @@ static const struct regmap_config wm8974_regmap = {
574 .max_register = WM8974_MONOMIX, 574 .max_register = WM8974_MONOMIX,
575 .reg_defaults = wm8974_reg_defaults, 575 .reg_defaults = wm8974_reg_defaults,
576 .num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults), 576 .num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults),
577 .cache_type = REGCACHE_FLAT,
577}; 578};
578 579
579static int wm8974_probe(struct snd_soc_codec *codec) 580static int wm8974_probe(struct snd_soc_codec *codec)
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index c1c9c2e3525b..2ccb8bccc9d4 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -223,8 +223,8 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp)
223 223
224 /* wait for XDATA to be cleared */ 224 /* wait for XDATA to be cleared */
225 cnt = 0; 225 cnt = 0;
226 while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & 226 while ((mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & XRDATA) &&
227 ~XRDATA) && (cnt < 100000)) 227 (cnt < 100000))
228 cnt++; 228 cnt++;
229 229
230 /* Release TX state machine */ 230 /* Release TX state machine */
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index ffd5f9acc849..08b460ba06ef 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -505,6 +505,24 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
505 FSL_SAI_CSR_FR, FSL_SAI_CSR_FR); 505 FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
506 regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 506 regmap_update_bits(sai->regmap, FSL_SAI_RCSR,
507 FSL_SAI_CSR_FR, FSL_SAI_CSR_FR); 507 FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
508
509 /*
510 * For sai master mode, after several open/close sai,
511 * there will be no frame clock, and can't recover
512 * anymore. Add software reset to fix this issue.
513 * This is a hardware bug, and will be fix in the
514 * next sai version.
515 */
516 if (!sai->is_slave_mode) {
517 /* Software Reset for both Tx and Rx */
518 regmap_write(sai->regmap,
519 FSL_SAI_TCSR, FSL_SAI_CSR_SR);
520 regmap_write(sai->regmap,
521 FSL_SAI_RCSR, FSL_SAI_CSR_SR);
522 /* Clear SR bit to finish the reset */
523 regmap_write(sai->regmap, FSL_SAI_TCSR, 0);
524 regmap_write(sai->regmap, FSL_SAI_RCSR, 0);
525 }
508 } 526 }
509 break; 527 break;
510 default: 528 default:
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index ac72ff5055bb..5a806da89f42 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -152,8 +152,10 @@ static int rk_spdif_trigger(struct snd_pcm_substream *substream,
152 case SNDRV_PCM_TRIGGER_RESUME: 152 case SNDRV_PCM_TRIGGER_RESUME:
153 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 153 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
154 ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR, 154 ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR,
155 SPDIF_DMACR_TDE_ENABLE, 155 SPDIF_DMACR_TDE_ENABLE |
156 SPDIF_DMACR_TDE_ENABLE); 156 SPDIF_DMACR_TDL_MASK,
157 SPDIF_DMACR_TDE_ENABLE |
158 SPDIF_DMACR_TDL(16));
157 159
158 if (ret != 0) 160 if (ret != 0)
159 return ret; 161 return ret;
diff --git a/sound/soc/rockchip/rockchip_spdif.h b/sound/soc/rockchip/rockchip_spdif.h
index 921b4095fb92..3ef12770ae12 100644
--- a/sound/soc/rockchip/rockchip_spdif.h
+++ b/sound/soc/rockchip/rockchip_spdif.h
@@ -42,7 +42,7 @@
42 42
43#define SPDIF_DMACR_TDL_SHIFT 0 43#define SPDIF_DMACR_TDL_SHIFT 0
44#define SPDIF_DMACR_TDL(x) ((x) << SPDIF_DMACR_TDL_SHIFT) 44#define SPDIF_DMACR_TDL(x) ((x) << SPDIF_DMACR_TDL_SHIFT)
45#define SPDIF_DMACR_TDL_MASK (0x1f << SDPIF_DMACR_TDL_SHIFT) 45#define SPDIF_DMACR_TDL_MASK (0x1f << SPDIF_DMACR_TDL_SHIFT)
46 46
47/* 47/*
48 * XFER 48 * XFER
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 65461f821a75..7a2f449bd85d 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1114,7 +1114,7 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1114 return true; 1114 return true;
1115 } 1115 }
1116 1116
1117 return dist_active_irq(vcpu); 1117 return vgic_irq_is_active(vcpu, map->virt_irq);
1118} 1118}
1119 1119
1120/* 1120/*