diff options
145 files changed, 1171 insertions, 697 deletions
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt index 06fc7602593a..37b2cafa4e52 100644 --- a/Documentation/devicetree/bindings/arm/arch_timer.txt +++ b/Documentation/devicetree/bindings/arm/arch_timer.txt | |||
| @@ -19,6 +19,9 @@ to deliver its interrupts via SPIs. | |||
| 19 | 19 | ||
| 20 | - clock-frequency : The frequency of the main counter, in Hz. Optional. | 20 | - clock-frequency : The frequency of the main counter, in Hz. Optional. |
| 21 | 21 | ||
| 22 | - always-on : a boolean property. If present, the timer is powered through an | ||
| 23 | always-on power domain, therefore it never loses context. | ||
| 24 | |||
| 22 | Example: | 25 | Example: |
| 23 | 26 | ||
| 24 | timer { | 27 | timer { |
diff --git a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt index 636f0ac4e223..2a60cd3e8d5d 100644 --- a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt +++ b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt | |||
| @@ -23,5 +23,5 @@ gmac0: ethernet@ff700000 { | |||
| 23 | interrupt-names = "macirq"; | 23 | interrupt-names = "macirq"; |
| 24 | mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */ | 24 | mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */ |
| 25 | clocks = <&emac_0_clk>; | 25 | clocks = <&emac_0_clk>; |
| 26 | clocks-names = "stmmaceth"; | 26 | clock-names = "stmmaceth"; |
| 27 | }; | 27 | }; |
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index 80c1fb8bfbb8..a2acd2b26baf 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt | |||
| @@ -33,7 +33,7 @@ Optional properties: | |||
| 33 | - max-frame-size: See ethernet.txt file in the same directory | 33 | - max-frame-size: See ethernet.txt file in the same directory |
| 34 | - clocks: If present, the first clock should be the GMAC main clock, | 34 | - clocks: If present, the first clock should be the GMAC main clock, |
| 35 | further clocks may be specified in derived bindings. | 35 | further clocks may be specified in derived bindings. |
| 36 | - clocks-names: One name for each entry in the clocks property, the | 36 | - clock-names: One name for each entry in the clocks property, the |
| 37 | first one should be "stmmaceth". | 37 | first one should be "stmmaceth". |
| 38 | 38 | ||
| 39 | Examples: | 39 | Examples: |
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt index 4bd5be0e5e7d..26bcb18f4e60 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt | |||
| @@ -83,7 +83,7 @@ Example: | |||
| 83 | reg = <0xfe61f080 0x4>; | 83 | reg = <0xfe61f080 0x4>; |
| 84 | reg-names = "irqmux"; | 84 | reg-names = "irqmux"; |
| 85 | interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; | 85 | interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; |
| 86 | interrupts-names = "irqmux"; | 86 | interrupt-names = "irqmux"; |
| 87 | ranges = <0 0xfe610000 0x5000>; | 87 | ranges = <0 0xfe610000 0x5000>; |
| 88 | 88 | ||
| 89 | PIO0: gpio@fe610000 { | 89 | PIO0: gpio@fe610000 { |
| @@ -165,7 +165,7 @@ sdhci0:sdhci@fe810000{ | |||
| 165 | interrupt-parent = <&PIO3>; | 165 | interrupt-parent = <&PIO3>; |
| 166 | #interrupt-cells = <2>; | 166 | #interrupt-cells = <2>; |
| 167 | interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */ | 167 | interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */ |
| 168 | interrupts-names = "card-detect"; | 168 | interrupt-names = "card-detect"; |
| 169 | pinctrl-names = "default"; | 169 | pinctrl-names = "default"; |
| 170 | pinctrl-0 = <&pinctrl_mmc>; | 170 | pinctrl-0 = <&pinctrl_mmc>; |
| 171 | }; | 171 | }; |
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt index 569b26c4a81e..60ca07996458 100644 --- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt +++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt | |||
| @@ -47,7 +47,7 @@ mcasp0: mcasp0@1d00000 { | |||
| 47 | reg = <0x100000 0x3000>; | 47 | reg = <0x100000 0x3000>; |
| 48 | reg-names "mpu"; | 48 | reg-names "mpu"; |
| 49 | interrupts = <82>, <83>; | 49 | interrupts = <82>, <83>; |
| 50 | interrupts-names = "tx", "rx"; | 50 | interrupt-names = "tx", "rx"; |
| 51 | op-mode = <0>; /* MCASP_IIS_MODE */ | 51 | op-mode = <0>; /* MCASP_IIS_MODE */ |
| 52 | tdm-slots = <2>; | 52 | tdm-slots = <2>; |
| 53 | serial-dir = < | 53 | serial-dir = < |
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt index 74c66dee3e14..eff12be5e789 100644 --- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt +++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt | |||
| @@ -13,6 +13,9 @@ Required properties: | |||
| 13 | "ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP) | 13 | "ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP) |
| 14 | 14 | ||
| 15 | - reg - <int> - I2C slave address | 15 | - reg - <int> - I2C slave address |
| 16 | - HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply, | ||
| 17 | DVDD-supply : power supplies for the device as covered in | ||
| 18 | Documentation/devicetree/bindings/regulator/regulator.txt | ||
| 16 | 19 | ||
| 17 | 20 | ||
| 18 | Optional properties: | 21 | Optional properties: |
| @@ -24,9 +27,6 @@ Optional properties: | |||
| 24 | 3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD | 27 | 3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD |
| 25 | If this node is not mentioned or if the value is unknown, then | 28 | If this node is not mentioned or if the value is unknown, then |
| 26 | micbias is set to 2.0V. | 29 | micbias is set to 2.0V. |
| 27 | - HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply, | ||
| 28 | DVDD-supply : power supplies for the device as covered in | ||
| 29 | Documentation/devicetree/bindings/regulator/regulator.txt | ||
| 30 | 30 | ||
| 31 | CODEC output pins: | 31 | CODEC output pins: |
| 32 | * HPL | 32 | * HPL |
diff --git a/MAINTAINERS b/MAINTAINERS index e67ea2442041..6bef70b614c9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -3485,6 +3485,12 @@ S: Maintained | |||
| 3485 | F: drivers/extcon/ | 3485 | F: drivers/extcon/ |
| 3486 | F: Documentation/extcon/ | 3486 | F: Documentation/extcon/ |
| 3487 | 3487 | ||
| 3488 | EXYNOS DP DRIVER | ||
| 3489 | M: Jingoo Han <jg1.han@samsung.com> | ||
| 3490 | L: dri-devel@lists.freedesktop.org | ||
| 3491 | S: Maintained | ||
| 3492 | F: drivers/gpu/drm/exynos/exynos_dp* | ||
| 3493 | |||
| 3488 | EXYNOS MIPI DISPLAY DRIVERS | 3494 | EXYNOS MIPI DISPLAY DRIVERS |
| 3489 | M: Inki Dae <inki.dae@samsung.com> | 3495 | M: Inki Dae <inki.dae@samsung.com> |
| 3490 | M: Donghwa Lee <dh09.lee@samsung.com> | 3496 | M: Donghwa Lee <dh09.lee@samsung.com> |
| @@ -5108,14 +5114,19 @@ F: drivers/s390/kvm/ | |||
| 5108 | 5114 | ||
| 5109 | KERNEL VIRTUAL MACHINE (KVM) FOR ARM | 5115 | KERNEL VIRTUAL MACHINE (KVM) FOR ARM |
| 5110 | M: Christoffer Dall <christoffer.dall@linaro.org> | 5116 | M: Christoffer Dall <christoffer.dall@linaro.org> |
| 5117 | M: Marc Zyngier <marc.zyngier@arm.com> | ||
| 5118 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
| 5111 | L: kvmarm@lists.cs.columbia.edu | 5119 | L: kvmarm@lists.cs.columbia.edu |
| 5112 | W: http://systems.cs.columbia.edu/projects/kvm-arm | 5120 | W: http://systems.cs.columbia.edu/projects/kvm-arm |
| 5113 | S: Supported | 5121 | S: Supported |
| 5114 | F: arch/arm/include/uapi/asm/kvm* | 5122 | F: arch/arm/include/uapi/asm/kvm* |
| 5115 | F: arch/arm/include/asm/kvm* | 5123 | F: arch/arm/include/asm/kvm* |
| 5116 | F: arch/arm/kvm/ | 5124 | F: arch/arm/kvm/ |
| 5125 | F: virt/kvm/arm/ | ||
| 5126 | F: include/kvm/arm_* | ||
| 5117 | 5127 | ||
| 5118 | KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) | 5128 | KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) |
| 5129 | M: Christoffer Dall <christoffer.dall@linaro.org> | ||
| 5119 | M: Marc Zyngier <marc.zyngier@arm.com> | 5130 | M: Marc Zyngier <marc.zyngier@arm.com> |
| 5120 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 5131 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 5121 | L: kvmarm@lists.cs.columbia.edu | 5132 | L: kvmarm@lists.cs.columbia.edu |
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 819dd5f7eb05..29b82adbf0b4 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S | |||
| @@ -614,11 +614,13 @@ resume_user_mode_begin: | |||
| 614 | 614 | ||
| 615 | resume_kernel_mode: | 615 | resume_kernel_mode: |
| 616 | 616 | ||
| 617 | #ifdef CONFIG_PREEMPT | 617 | ; Disable Interrupts from this point on |
| 618 | 618 | ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq() | |
| 619 | ; This is a must for preempt_schedule_irq() | 619 | ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe |
| 620 | IRQ_DISABLE r9 | 620 | IRQ_DISABLE r9 |
| 621 | 621 | ||
| 622 | #ifdef CONFIG_PREEMPT | ||
| 623 | |||
| 622 | ; Can't preempt if preemption disabled | 624 | ; Can't preempt if preemption disabled |
| 623 | GET_CURR_THR_INFO_FROM_SP r10 | 625 | GET_CURR_THR_INFO_FROM_SP r10 |
| 624 | ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] | 626 | ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] |
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 07f283c20eb1..cb6811e5ae5a 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi | |||
| @@ -802,7 +802,7 @@ | |||
| 802 | <0x46000000 0x400000>; | 802 | <0x46000000 0x400000>; |
| 803 | reg-names = "mpu", "dat"; | 803 | reg-names = "mpu", "dat"; |
| 804 | interrupts = <80>, <81>; | 804 | interrupts = <80>, <81>; |
| 805 | interrupts-names = "tx", "rx"; | 805 | interrupt-names = "tx", "rx"; |
| 806 | status = "disabled"; | 806 | status = "disabled"; |
| 807 | dmas = <&edma 8>, | 807 | dmas = <&edma 8>, |
| 808 | <&edma 9>; | 808 | <&edma 9>; |
| @@ -816,7 +816,7 @@ | |||
| 816 | <0x46400000 0x400000>; | 816 | <0x46400000 0x400000>; |
| 817 | reg-names = "mpu", "dat"; | 817 | reg-names = "mpu", "dat"; |
| 818 | interrupts = <82>, <83>; | 818 | interrupts = <82>, <83>; |
| 819 | interrupts-names = "tx", "rx"; | 819 | interrupt-names = "tx", "rx"; |
| 820 | status = "disabled"; | 820 | status = "disabled"; |
| 821 | dmas = <&edma 10>, | 821 | dmas = <&edma 10>, |
| 822 | <&edma 11>; | 822 | <&edma 11>; |
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index 36d523a26831..d1f8707ff1df 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi | |||
| @@ -691,7 +691,7 @@ | |||
| 691 | <0x46000000 0x400000>; | 691 | <0x46000000 0x400000>; |
| 692 | reg-names = "mpu", "dat"; | 692 | reg-names = "mpu", "dat"; |
| 693 | interrupts = <80>, <81>; | 693 | interrupts = <80>, <81>; |
| 694 | interrupts-names = "tx", "rx"; | 694 | interrupt-names = "tx", "rx"; |
| 695 | status = "disabled"; | 695 | status = "disabled"; |
| 696 | dmas = <&edma 8>, | 696 | dmas = <&edma 8>, |
| 697 | <&edma 9>; | 697 | <&edma 9>; |
| @@ -705,7 +705,7 @@ | |||
| 705 | <0x46400000 0x400000>; | 705 | <0x46400000 0x400000>; |
| 706 | reg-names = "mpu", "dat"; | 706 | reg-names = "mpu", "dat"; |
| 707 | interrupts = <82>, <83>; | 707 | interrupts = <82>, <83>; |
| 708 | interrupts-names = "tx", "rx"; | 708 | interrupt-names = "tx", "rx"; |
| 709 | status = "disabled"; | 709 | status = "disabled"; |
| 710 | dmas = <&edma 10>, | 710 | dmas = <&edma 10>, |
| 711 | <&edma 11>; | 711 | <&edma 11>; |
diff --git a/arch/arm/boot/dts/stih415-pinctrl.dtsi b/arch/arm/boot/dts/stih415-pinctrl.dtsi index f09fb10a3791..81df870e5ee6 100644 --- a/arch/arm/boot/dts/stih415-pinctrl.dtsi +++ b/arch/arm/boot/dts/stih415-pinctrl.dtsi | |||
| @@ -49,7 +49,7 @@ | |||
| 49 | reg = <0xfe61f080 0x4>; | 49 | reg = <0xfe61f080 0x4>; |
| 50 | reg-names = "irqmux"; | 50 | reg-names = "irqmux"; |
| 51 | interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; | 51 | interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; |
| 52 | interrupts-names = "irqmux"; | 52 | interrupt-names = "irqmux"; |
| 53 | ranges = <0 0xfe610000 0x5000>; | 53 | ranges = <0 0xfe610000 0x5000>; |
| 54 | 54 | ||
| 55 | PIO0: gpio@fe610000 { | 55 | PIO0: gpio@fe610000 { |
| @@ -187,7 +187,7 @@ | |||
| 187 | reg = <0xfee0f080 0x4>; | 187 | reg = <0xfee0f080 0x4>; |
| 188 | reg-names = "irqmux"; | 188 | reg-names = "irqmux"; |
| 189 | interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>; | 189 | interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>; |
| 190 | interrupts-names = "irqmux"; | 190 | interrupt-names = "irqmux"; |
| 191 | ranges = <0 0xfee00000 0x8000>; | 191 | ranges = <0 0xfee00000 0x8000>; |
| 192 | 192 | ||
| 193 | PIO5: gpio@fee00000 { | 193 | PIO5: gpio@fee00000 { |
| @@ -282,7 +282,7 @@ | |||
| 282 | reg = <0xfe82f080 0x4>; | 282 | reg = <0xfe82f080 0x4>; |
| 283 | reg-names = "irqmux"; | 283 | reg-names = "irqmux"; |
| 284 | interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>; | 284 | interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>; |
| 285 | interrupts-names = "irqmux"; | 285 | interrupt-names = "irqmux"; |
| 286 | ranges = <0 0xfe820000 0x8000>; | 286 | ranges = <0 0xfe820000 0x8000>; |
| 287 | 287 | ||
| 288 | PIO13: gpio@fe820000 { | 288 | PIO13: gpio@fe820000 { |
| @@ -423,7 +423,7 @@ | |||
| 423 | reg = <0xfd6bf080 0x4>; | 423 | reg = <0xfd6bf080 0x4>; |
| 424 | reg-names = "irqmux"; | 424 | reg-names = "irqmux"; |
| 425 | interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; | 425 | interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; |
| 426 | interrupts-names = "irqmux"; | 426 | interrupt-names = "irqmux"; |
| 427 | ranges = <0 0xfd6b0000 0x3000>; | 427 | ranges = <0 0xfd6b0000 0x3000>; |
| 428 | 428 | ||
| 429 | PIO100: gpio@fd6b0000 { | 429 | PIO100: gpio@fd6b0000 { |
| @@ -460,7 +460,7 @@ | |||
| 460 | reg = <0xfd33f080 0x4>; | 460 | reg = <0xfd33f080 0x4>; |
| 461 | reg-names = "irqmux"; | 461 | reg-names = "irqmux"; |
| 462 | interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; | 462 | interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; |
| 463 | interrupts-names = "irqmux"; | 463 | interrupt-names = "irqmux"; |
| 464 | ranges = <0 0xfd330000 0x5000>; | 464 | ranges = <0 0xfd330000 0x5000>; |
| 465 | 465 | ||
| 466 | PIO103: gpio@fd330000 { | 466 | PIO103: gpio@fd330000 { |
diff --git a/arch/arm/boot/dts/stih416-pinctrl.dtsi b/arch/arm/boot/dts/stih416-pinctrl.dtsi index aeea304086eb..250d5ecc951e 100644 --- a/arch/arm/boot/dts/stih416-pinctrl.dtsi +++ b/arch/arm/boot/dts/stih416-pinctrl.dtsi | |||
| @@ -53,7 +53,7 @@ | |||
| 53 | reg = <0xfe61f080 0x4>; | 53 | reg = <0xfe61f080 0x4>; |
| 54 | reg-names = "irqmux"; | 54 | reg-names = "irqmux"; |
| 55 | interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>; | 55 | interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>; |
| 56 | interrupts-names = "irqmux"; | 56 | interrupt-names = "irqmux"; |
| 57 | ranges = <0 0xfe610000 0x6000>; | 57 | ranges = <0 0xfe610000 0x6000>; |
| 58 | 58 | ||
| 59 | PIO0: gpio@fe610000 { | 59 | PIO0: gpio@fe610000 { |
| @@ -201,7 +201,7 @@ | |||
| 201 | reg = <0xfee0f080 0x4>; | 201 | reg = <0xfee0f080 0x4>; |
| 202 | reg-names = "irqmux"; | 202 | reg-names = "irqmux"; |
| 203 | interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>; | 203 | interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>; |
| 204 | interrupts-names = "irqmux"; | 204 | interrupt-names = "irqmux"; |
| 205 | ranges = <0 0xfee00000 0x10000>; | 205 | ranges = <0 0xfee00000 0x10000>; |
| 206 | 206 | ||
| 207 | PIO5: gpio@fee00000 { | 207 | PIO5: gpio@fee00000 { |
| @@ -333,7 +333,7 @@ | |||
| 333 | reg = <0xfe82f080 0x4>; | 333 | reg = <0xfe82f080 0x4>; |
| 334 | reg-names = "irqmux"; | 334 | reg-names = "irqmux"; |
| 335 | interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; | 335 | interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; |
| 336 | interrupts-names = "irqmux"; | 336 | interrupt-names = "irqmux"; |
| 337 | ranges = <0 0xfe820000 0x6000>; | 337 | ranges = <0 0xfe820000 0x6000>; |
| 338 | 338 | ||
| 339 | PIO13: gpio@fe820000 { | 339 | PIO13: gpio@fe820000 { |
| @@ -461,7 +461,7 @@ | |||
| 461 | reg = <0xfd6bf080 0x4>; | 461 | reg = <0xfd6bf080 0x4>; |
| 462 | reg-names = "irqmux"; | 462 | reg-names = "irqmux"; |
| 463 | interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; | 463 | interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; |
| 464 | interrupts-names = "irqmux"; | 464 | interrupt-names = "irqmux"; |
| 465 | ranges = <0 0xfd6b0000 0x3000>; | 465 | ranges = <0 0xfd6b0000 0x3000>; |
| 466 | 466 | ||
| 467 | PIO100: gpio@fd6b0000 { | 467 | PIO100: gpio@fd6b0000 { |
| @@ -498,7 +498,7 @@ | |||
| 498 | reg = <0xfd33f080 0x4>; | 498 | reg = <0xfd33f080 0x4>; |
| 499 | reg-names = "irqmux"; | 499 | reg-names = "irqmux"; |
| 500 | interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; | 500 | interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; |
| 501 | interrupts-names = "irqmux"; | 501 | interrupt-names = "irqmux"; |
| 502 | ranges = <0 0xfd330000 0x5000>; | 502 | ranges = <0 0xfd330000 0x5000>; |
| 503 | 503 | ||
| 504 | PIO103: gpio@fd330000 { | 504 | PIO103: gpio@fd330000 { |
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index 466bd299b1a8..4be5bb150bdd 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig | |||
| @@ -23,7 +23,7 @@ config KVM | |||
| 23 | select HAVE_KVM_CPU_RELAX_INTERCEPT | 23 | select HAVE_KVM_CPU_RELAX_INTERCEPT |
| 24 | select KVM_MMIO | 24 | select KVM_MMIO |
| 25 | select KVM_ARM_HOST | 25 | select KVM_ARM_HOST |
| 26 | depends on ARM_VIRT_EXT && ARM_LPAE | 26 | depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN |
| 27 | ---help--- | 27 | ---help--- |
| 28 | Support hosting virtualized guest machines. You will also | 28 | Support hosting virtualized guest machines. You will also |
| 29 | need to select one or more of the processor modules below. | 29 | need to select one or more of the processor modules below. |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 80bb1e6c2c29..16f804938b8f 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
| @@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start; | |||
| 42 | static unsigned long hyp_idmap_end; | 42 | static unsigned long hyp_idmap_end; |
| 43 | static phys_addr_t hyp_idmap_vector; | 43 | static phys_addr_t hyp_idmap_vector; |
| 44 | 44 | ||
| 45 | #define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) | ||
| 46 | |||
| 45 | #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) | 47 | #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) |
| 46 | 48 | ||
| 47 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | 49 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
| @@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void) | |||
| 293 | if (boot_hyp_pgd) { | 295 | if (boot_hyp_pgd) { |
| 294 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); | 296 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
| 295 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 297 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
| 296 | kfree(boot_hyp_pgd); | 298 | free_pages((unsigned long)boot_hyp_pgd, pgd_order); |
| 297 | boot_hyp_pgd = NULL; | 299 | boot_hyp_pgd = NULL; |
| 298 | } | 300 | } |
| 299 | 301 | ||
| 300 | if (hyp_pgd) | 302 | if (hyp_pgd) |
| 301 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 303 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
| 302 | 304 | ||
| 303 | kfree(init_bounce_page); | 305 | free_page((unsigned long)init_bounce_page); |
| 304 | init_bounce_page = NULL; | 306 | init_bounce_page = NULL; |
| 305 | 307 | ||
| 306 | mutex_unlock(&kvm_hyp_pgd_mutex); | 308 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| @@ -330,7 +332,7 @@ void free_hyp_pgds(void) | |||
| 330 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) | 332 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
| 331 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); | 333 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
| 332 | 334 | ||
| 333 | kfree(hyp_pgd); | 335 | free_pages((unsigned long)hyp_pgd, pgd_order); |
| 334 | hyp_pgd = NULL; | 336 | hyp_pgd = NULL; |
| 335 | } | 337 | } |
| 336 | 338 | ||
| @@ -1024,7 +1026,7 @@ int kvm_mmu_init(void) | |||
| 1024 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; | 1026 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; |
| 1025 | phys_addr_t phys_base; | 1027 | phys_addr_t phys_base; |
| 1026 | 1028 | ||
| 1027 | init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL); | 1029 | init_bounce_page = (void *)__get_free_page(GFP_KERNEL); |
| 1028 | if (!init_bounce_page) { | 1030 | if (!init_bounce_page) { |
| 1029 | kvm_err("Couldn't allocate HYP init bounce page\n"); | 1031 | kvm_err("Couldn't allocate HYP init bounce page\n"); |
| 1030 | err = -ENOMEM; | 1032 | err = -ENOMEM; |
| @@ -1050,8 +1052,9 @@ int kvm_mmu_init(void) | |||
| 1050 | (unsigned long)phys_base); | 1052 | (unsigned long)phys_base); |
| 1051 | } | 1053 | } |
| 1052 | 1054 | ||
| 1053 | hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); | 1055 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); |
| 1054 | boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); | 1056 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); |
| 1057 | |||
| 1055 | if (!hyp_pgd || !boot_hyp_pgd) { | 1058 | if (!hyp_pgd || !boot_hyp_pgd) { |
| 1056 | kvm_err("Hyp mode PGD not allocated\n"); | 1059 | kvm_err("Hyp mode PGD not allocated\n"); |
| 1057 | err = -ENOMEM; | 1060 | err = -ENOMEM; |
diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h deleted file mode 100644 index 4e863daea25b..000000000000 --- a/arch/hexagon/include/asm/barrier.h +++ /dev/null | |||
| @@ -1,37 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Memory barrier definitions for the Hexagon architecture | ||
| 3 | * | ||
| 4 | * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 and | ||
| 8 | * only version 2 as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
| 18 | * 02110-1301, USA. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef _ASM_BARRIER_H | ||
| 22 | #define _ASM_BARRIER_H | ||
| 23 | |||
| 24 | #define rmb() barrier() | ||
| 25 | #define read_barrier_depends() barrier() | ||
| 26 | #define wmb() barrier() | ||
| 27 | #define mb() barrier() | ||
| 28 | #define smp_rmb() barrier() | ||
| 29 | #define smp_read_barrier_depends() barrier() | ||
| 30 | #define smp_wmb() barrier() | ||
| 31 | #define smp_mb() barrier() | ||
| 32 | |||
| 33 | /* Set a value and use a memory barrier. Used by the scheduler somewhere. */ | ||
| 34 | #define set_mb(var, value) \ | ||
| 35 | do { var = value; mb(); } while (0) | ||
| 36 | |||
| 37 | #endif /* _ASM_BARRIER_H */ | ||
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild index a580642555b6..348356c99514 100644 --- a/arch/parisc/include/uapi/asm/Kbuild +++ b/arch/parisc/include/uapi/asm/Kbuild | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | # UAPI Header export list | 1 | # UAPI Header export list |
| 2 | include include/uapi/asm-generic/Kbuild.asm | 2 | include include/uapi/asm-generic/Kbuild.asm |
| 3 | 3 | ||
| 4 | generic-y += resource.h | ||
| 5 | |||
| 4 | header-y += bitsperlong.h | 6 | header-y += bitsperlong.h |
| 5 | header-y += byteorder.h | 7 | header-y += byteorder.h |
| 6 | header-y += errno.h | 8 | header-y += errno.h |
| @@ -13,7 +15,6 @@ header-y += msgbuf.h | |||
| 13 | header-y += pdc.h | 15 | header-y += pdc.h |
| 14 | header-y += posix_types.h | 16 | header-y += posix_types.h |
| 15 | header-y += ptrace.h | 17 | header-y += ptrace.h |
| 16 | header-y += resource.h | ||
| 17 | header-y += sembuf.h | 18 | header-y += sembuf.h |
| 18 | header-y += setup.h | 19 | header-y += setup.h |
| 19 | header-y += shmbuf.h | 20 | header-y += shmbuf.h |
diff --git a/arch/parisc/include/uapi/asm/resource.h b/arch/parisc/include/uapi/asm/resource.h deleted file mode 100644 index 8b06343b62ed..000000000000 --- a/arch/parisc/include/uapi/asm/resource.h +++ /dev/null | |||
| @@ -1,7 +0,0 @@ | |||
| 1 | #ifndef _ASM_PARISC_RESOURCE_H | ||
| 2 | #define _ASM_PARISC_RESOURCE_H | ||
| 3 | |||
| 4 | #define _STK_LIM_MAX 10 * _STK_LIM | ||
| 5 | #include <asm-generic/resource.h> | ||
| 6 | |||
| 7 | #endif | ||
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c index a28f02165e97..d367a0aece2a 100644 --- a/arch/powerpc/boot/main.c +++ b/arch/powerpc/boot/main.c | |||
| @@ -139,18 +139,18 @@ static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen, | |||
| 139 | * edit the command line passed to vmlinux (by setting /chosen/bootargs). | 139 | * edit the command line passed to vmlinux (by setting /chosen/bootargs). |
| 140 | * The buffer is put in it's own section so that tools may locate it easier. | 140 | * The buffer is put in it's own section so that tools may locate it easier. |
| 141 | */ | 141 | */ |
| 142 | static char cmdline[COMMAND_LINE_SIZE] | 142 | static char cmdline[BOOT_COMMAND_LINE_SIZE] |
| 143 | __attribute__((__section__("__builtin_cmdline"))); | 143 | __attribute__((__section__("__builtin_cmdline"))); |
| 144 | 144 | ||
| 145 | static void prep_cmdline(void *chosen) | 145 | static void prep_cmdline(void *chosen) |
| 146 | { | 146 | { |
| 147 | if (cmdline[0] == '\0') | 147 | if (cmdline[0] == '\0') |
| 148 | getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1); | 148 | getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1); |
| 149 | 149 | ||
| 150 | printf("\n\rLinux/PowerPC load: %s", cmdline); | 150 | printf("\n\rLinux/PowerPC load: %s", cmdline); |
| 151 | /* If possible, edit the command line */ | 151 | /* If possible, edit the command line */ |
| 152 | if (console_ops.edit_cmdline) | 152 | if (console_ops.edit_cmdline) |
| 153 | console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE); | 153 | console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE); |
| 154 | printf("\n\r"); | 154 | printf("\n\r"); |
| 155 | 155 | ||
| 156 | /* Put the command line back into the devtree for the kernel */ | 156 | /* Put the command line back into the devtree for the kernel */ |
| @@ -174,7 +174,7 @@ void start(void) | |||
| 174 | * built-in command line wasn't set by an external tool */ | 174 | * built-in command line wasn't set by an external tool */ |
| 175 | if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0')) | 175 | if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0')) |
| 176 | memmove(cmdline, loader_info.cmdline, | 176 | memmove(cmdline, loader_info.cmdline, |
| 177 | min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1)); | 177 | min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1)); |
| 178 | 178 | ||
| 179 | if (console_ops.open && (console_ops.open() < 0)) | 179 | if (console_ops.open && (console_ops.open() < 0)) |
| 180 | exit(); | 180 | exit(); |
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h index b3218ce451bb..8aad3c55aeda 100644 --- a/arch/powerpc/boot/ops.h +++ b/arch/powerpc/boot/ops.h | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | #include "types.h" | 15 | #include "types.h" |
| 16 | #include "string.h" | 16 | #include "string.h" |
| 17 | 17 | ||
| 18 | #define COMMAND_LINE_SIZE 512 | 18 | #define BOOT_COMMAND_LINE_SIZE 2048 |
| 19 | #define MAX_PATH_LEN 256 | 19 | #define MAX_PATH_LEN 256 |
| 20 | #define MAX_PROP_LEN 256 /* What should this be? */ | 20 | #define MAX_PROP_LEN 256 /* What should this be? */ |
| 21 | 21 | ||
diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c index 9954d98871d0..4ec2d86d3c50 100644 --- a/arch/powerpc/boot/ps3.c +++ b/arch/powerpc/boot/ps3.c | |||
| @@ -47,13 +47,13 @@ BSS_STACK(4096); | |||
| 47 | * The buffer is put in it's own section so that tools may locate it easier. | 47 | * The buffer is put in it's own section so that tools may locate it easier. |
| 48 | */ | 48 | */ |
| 49 | 49 | ||
| 50 | static char cmdline[COMMAND_LINE_SIZE] | 50 | static char cmdline[BOOT_COMMAND_LINE_SIZE] |
| 51 | __attribute__((__section__("__builtin_cmdline"))); | 51 | __attribute__((__section__("__builtin_cmdline"))); |
| 52 | 52 | ||
| 53 | static void prep_cmdline(void *chosen) | 53 | static void prep_cmdline(void *chosen) |
| 54 | { | 54 | { |
| 55 | if (cmdline[0] == '\0') | 55 | if (cmdline[0] == '\0') |
| 56 | getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1); | 56 | getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1); |
| 57 | else | 57 | else |
| 58 | setprop_str(chosen, "bootargs", cmdline); | 58 | setprop_str(chosen, "bootargs", cmdline); |
| 59 | 59 | ||
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index a2efdaa020b0..66ad7a74116f 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
| @@ -41,14 +41,14 @@ struct opal_takeover_args { | |||
| 41 | * size except the last one in the list to be as well. | 41 | * size except the last one in the list to be as well. |
| 42 | */ | 42 | */ |
| 43 | struct opal_sg_entry { | 43 | struct opal_sg_entry { |
| 44 | void *data; | 44 | __be64 data; |
| 45 | long length; | 45 | __be64 length; |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | /* sg list */ | 48 | /* SG list */ |
| 49 | struct opal_sg_list { | 49 | struct opal_sg_list { |
| 50 | unsigned long num_entries; | 50 | __be64 length; |
| 51 | struct opal_sg_list *next; | 51 | __be64 next; |
| 52 | struct opal_sg_entry entry[]; | 52 | struct opal_sg_entry entry[]; |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| @@ -858,8 +858,8 @@ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, | |||
| 858 | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 858 | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
| 859 | uint32_t addr, __be32 *data, uint32_t sz); | 859 | uint32_t addr, __be32 *data, uint32_t sz); |
| 860 | 860 | ||
| 861 | int64_t opal_read_elog(uint64_t buffer, size_t size, uint64_t log_id); | 861 | int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id); |
| 862 | int64_t opal_get_elog_size(uint64_t *log_id, size_t *size, uint64_t *elog_type); | 862 | int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type); |
| 863 | int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset); | 863 | int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset); |
| 864 | int64_t opal_send_ack_elog(uint64_t log_id); | 864 | int64_t opal_send_ack_elog(uint64_t log_id); |
| 865 | void opal_resend_pending_logs(void); | 865 | void opal_resend_pending_logs(void); |
| @@ -868,23 +868,24 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); | |||
| 868 | int64_t opal_manage_flash(uint8_t op); | 868 | int64_t opal_manage_flash(uint8_t op); |
| 869 | int64_t opal_update_flash(uint64_t blk_list); | 869 | int64_t opal_update_flash(uint64_t blk_list); |
| 870 | int64_t opal_dump_init(uint8_t dump_type); | 870 | int64_t opal_dump_init(uint8_t dump_type); |
| 871 | int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size); | 871 | int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size); |
| 872 | int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type); | 872 | int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type); |
| 873 | int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer); | 873 | int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer); |
| 874 | int64_t opal_dump_ack(uint32_t dump_id); | 874 | int64_t opal_dump_ack(uint32_t dump_id); |
| 875 | int64_t opal_dump_resend_notification(void); | 875 | int64_t opal_dump_resend_notification(void); |
| 876 | 876 | ||
| 877 | int64_t opal_get_msg(uint64_t buffer, size_t size); | 877 | int64_t opal_get_msg(uint64_t buffer, uint64_t size); |
| 878 | int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token); | 878 | int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token); |
| 879 | int64_t opal_sync_host_reboot(void); | 879 | int64_t opal_sync_host_reboot(void); |
| 880 | int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer, | 880 | int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer, |
| 881 | size_t length); | 881 | uint64_t length); |
| 882 | int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer, | 882 | int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer, |
| 883 | size_t length); | 883 | uint64_t length); |
| 884 | int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data); | 884 | int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data); |
| 885 | 885 | ||
| 886 | /* Internal functions */ | 886 | /* Internal functions */ |
| 887 | extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); | 887 | extern int early_init_dt_scan_opal(unsigned long node, const char *uname, |
| 888 | int depth, void *data); | ||
| 888 | extern int early_init_dt_scan_recoverable_ranges(unsigned long node, | 889 | extern int early_init_dt_scan_recoverable_ranges(unsigned long node, |
| 889 | const char *uname, int depth, void *data); | 890 | const char *uname, int depth, void *data); |
| 890 | 891 | ||
| @@ -893,10 +894,6 @@ extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); | |||
| 893 | 894 | ||
| 894 | extern void hvc_opal_init_early(void); | 895 | extern void hvc_opal_init_early(void); |
| 895 | 896 | ||
| 896 | /* Internal functions */ | ||
| 897 | extern int early_init_dt_scan_opal(unsigned long node, const char *uname, | ||
| 898 | int depth, void *data); | ||
| 899 | |||
| 900 | extern int opal_notifier_register(struct notifier_block *nb); | 897 | extern int opal_notifier_register(struct notifier_block *nb); |
| 901 | extern int opal_notifier_unregister(struct notifier_block *nb); | 898 | extern int opal_notifier_unregister(struct notifier_block *nb); |
| 902 | 899 | ||
| @@ -906,9 +903,6 @@ extern void opal_notifier_enable(void); | |||
| 906 | extern void opal_notifier_disable(void); | 903 | extern void opal_notifier_disable(void); |
| 907 | extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); | 904 | extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); |
| 908 | 905 | ||
| 909 | extern int opal_get_chars(uint32_t vtermno, char *buf, int count); | ||
| 910 | extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); | ||
| 911 | |||
| 912 | extern int __opal_async_get_token(void); | 906 | extern int __opal_async_get_token(void); |
| 913 | extern int opal_async_get_token_interruptible(void); | 907 | extern int opal_async_get_token_interruptible(void); |
| 914 | extern int __opal_async_release_token(int token); | 908 | extern int __opal_async_release_token(int token); |
| @@ -916,8 +910,6 @@ extern int opal_async_release_token(int token); | |||
| 916 | extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg); | 910 | extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg); |
| 917 | extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data); | 911 | extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data); |
| 918 | 912 | ||
| 919 | extern void hvc_opal_init_early(void); | ||
| 920 | |||
| 921 | struct rtc_time; | 913 | struct rtc_time; |
| 922 | extern int opal_set_rtc_time(struct rtc_time *tm); | 914 | extern int opal_set_rtc_time(struct rtc_time *tm); |
| 923 | extern void opal_get_rtc_time(struct rtc_time *tm); | 915 | extern void opal_get_rtc_time(struct rtc_time *tm); |
| @@ -937,6 +929,10 @@ extern int opal_resync_timebase(void); | |||
| 937 | 929 | ||
| 938 | extern void opal_lpc_init(void); | 930 | extern void opal_lpc_init(void); |
| 939 | 931 | ||
| 932 | struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, | ||
| 933 | unsigned long vmalloc_size); | ||
| 934 | void opal_free_sg_list(struct opal_sg_list *sg); | ||
| 935 | |||
| 940 | #endif /* __ASSEMBLY__ */ | 936 | #endif /* __ASSEMBLY__ */ |
| 941 | 937 | ||
| 942 | #endif /* __OPAL_H */ | 938 | #endif /* __OPAL_H */ |
diff --git a/arch/powerpc/include/uapi/asm/setup.h b/arch/powerpc/include/uapi/asm/setup.h index 552df83f1a49..ae3fb68cb28e 100644 --- a/arch/powerpc/include/uapi/asm/setup.h +++ b/arch/powerpc/include/uapi/asm/setup.h | |||
| @@ -1 +1,6 @@ | |||
| 1 | #include <asm-generic/setup.h> | 1 | #ifndef _UAPI_ASM_POWERPC_SETUP_H |
| 2 | #define _UAPI_ASM_POWERPC_SETUP_H | ||
| 3 | |||
| 4 | #define COMMAND_LINE_SIZE 2048 | ||
| 5 | |||
| 6 | #endif /* _UAPI_ASM_POWERPC_SETUP_H */ | ||
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 3bd77edd7610..450850a49dce 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
| @@ -120,6 +120,7 @@ EXPORT_SYMBOL(giveup_spe); | |||
| 120 | EXPORT_SYMBOL(flush_instruction_cache); | 120 | EXPORT_SYMBOL(flush_instruction_cache); |
| 121 | #endif | 121 | #endif |
| 122 | EXPORT_SYMBOL(flush_dcache_range); | 122 | EXPORT_SYMBOL(flush_dcache_range); |
| 123 | EXPORT_SYMBOL(flush_icache_range); | ||
| 123 | 124 | ||
| 124 | #ifdef CONFIG_SMP | 125 | #ifdef CONFIG_SMP |
| 125 | #ifdef CONFIG_PPC32 | 126 | #ifdef CONFIG_PPC32 |
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 2f3cdb01506d..658e89d2025b 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c | |||
| @@ -705,7 +705,7 @@ static int __init rtas_flash_init(void) | |||
| 705 | if (rtas_token("ibm,update-flash-64-and-reboot") == | 705 | if (rtas_token("ibm,update-flash-64-and-reboot") == |
| 706 | RTAS_UNKNOWN_SERVICE) { | 706 | RTAS_UNKNOWN_SERVICE) { |
| 707 | pr_info("rtas_flash: no firmware flash support\n"); | 707 | pr_info("rtas_flash: no firmware flash support\n"); |
| 708 | return 1; | 708 | return -EINVAL; |
| 709 | } | 709 | } |
| 710 | 710 | ||
| 711 | rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL); | 711 | rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL); |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ffbb871c2bd8..b031f932c0cc 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -242,6 +242,12 @@ kvm_novcpu_exit: | |||
| 242 | */ | 242 | */ |
| 243 | .globl kvm_start_guest | 243 | .globl kvm_start_guest |
| 244 | kvm_start_guest: | 244 | kvm_start_guest: |
| 245 | |||
| 246 | /* Set runlatch bit the minute you wake up from nap */ | ||
| 247 | mfspr r1, SPRN_CTRLF | ||
| 248 | ori r1, r1, 1 | ||
| 249 | mtspr SPRN_CTRLT, r1 | ||
| 250 | |||
| 245 | ld r2,PACATOC(r13) | 251 | ld r2,PACATOC(r13) |
| 246 | 252 | ||
| 247 | li r0,KVM_HWTHREAD_IN_KVM | 253 | li r0,KVM_HWTHREAD_IN_KVM |
| @@ -309,6 +315,11 @@ kvm_no_guest: | |||
| 309 | li r0, KVM_HWTHREAD_IN_NAP | 315 | li r0, KVM_HWTHREAD_IN_NAP |
| 310 | stb r0, HSTATE_HWTHREAD_STATE(r13) | 316 | stb r0, HSTATE_HWTHREAD_STATE(r13) |
| 311 | kvm_do_nap: | 317 | kvm_do_nap: |
| 318 | /* Clear the runlatch bit before napping */ | ||
| 319 | mfspr r2, SPRN_CTRLF | ||
| 320 | clrrdi r2, r2, 1 | ||
| 321 | mtspr SPRN_CTRLT, r2 | ||
| 322 | |||
| 312 | li r3, LPCR_PECE0 | 323 | li r3, LPCR_PECE0 |
| 313 | mfspr r4, SPRN_LPCR | 324 | mfspr r4, SPRN_LPCR |
| 314 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 | 325 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 |
| @@ -1999,8 +2010,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
| 1999 | 2010 | ||
| 2000 | /* | 2011 | /* |
| 2001 | * Take a nap until a decrementer or external or doobell interrupt | 2012 | * Take a nap until a decrementer or external or doobell interrupt |
| 2002 | * occurs, with PECE1, PECE0 and PECEDP set in LPCR | 2013 | * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the |
| 2014 | * runlatch bit before napping. | ||
| 2003 | */ | 2015 | */ |
| 2016 | mfspr r2, SPRN_CTRLF | ||
| 2017 | clrrdi r2, r2, 1 | ||
| 2018 | mtspr SPRN_CTRLT, r2 | ||
| 2019 | |||
| 2004 | li r0,1 | 2020 | li r0,1 |
| 2005 | stb r0,HSTATE_HWTHREAD_REQ(r13) | 2021 | stb r0,HSTATE_HWTHREAD_REQ(r13) |
| 2006 | mfspr r5,SPRN_LPCR | 2022 | mfspr r5,SPRN_LPCR |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 3ea26c25590b..cf1d325eae8b 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
| @@ -82,17 +82,14 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) | |||
| 82 | va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); | 82 | va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); |
| 83 | va |= penc << 12; | 83 | va |= penc << 12; |
| 84 | va |= ssize << 8; | 84 | va |= ssize << 8; |
| 85 | /* Add AVAL part */ | 85 | /* |
| 86 | if (psize != apsize) { | 86 | * AVAL bits: |
| 87 | /* | 87 | * We don't need all the bits, but rest of the bits |
| 88 | * MPSS, 64K base page size and 16MB parge page size | 88 | * must be ignored by the processor. |
| 89 | * We don't need all the bits, but rest of the bits | 89 | * vpn cover upto 65 bits of va. (0...65) and we need |
| 90 | * must be ignored by the processor. | 90 | * 58..64 bits of va. |
| 91 | * vpn cover upto 65 bits of va. (0...65) and we need | 91 | */ |
| 92 | * 58..64 bits of va. | 92 | va |= (vpn & 0xfe); /* AVAL */ |
| 93 | */ | ||
| 94 | va |= (vpn & 0xfe); | ||
| 95 | } | ||
| 96 | va |= 1; /* L */ | 93 | va |= 1; /* L */ |
| 97 | asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) | 94 | asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) |
| 98 | : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) | 95 | : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) |
| @@ -133,17 +130,14 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) | |||
| 133 | va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); | 130 | va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); |
| 134 | va |= penc << 12; | 131 | va |= penc << 12; |
| 135 | va |= ssize << 8; | 132 | va |= ssize << 8; |
| 136 | /* Add AVAL part */ | 133 | /* |
| 137 | if (psize != apsize) { | 134 | * AVAL bits: |
| 138 | /* | 135 | * We don't need all the bits, but rest of the bits |
| 139 | * MPSS, 64K base page size and 16MB parge page size | 136 | * must be ignored by the processor. |
| 140 | * We don't need all the bits, but rest of the bits | 137 | * vpn cover upto 65 bits of va. (0...65) and we need |
| 141 | * must be ignored by the processor. | 138 | * 58..64 bits of va. |
| 142 | * vpn cover upto 65 bits of va. (0...65) and we need | 139 | */ |
| 143 | * 58..64 bits of va. | 140 | va |= (vpn & 0xfe); |
| 144 | */ | ||
| 145 | va |= (vpn & 0xfe); | ||
| 146 | } | ||
| 147 | va |= 1; /* L */ | 141 | va |= 1; /* L */ |
| 148 | asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" | 142 | asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" |
| 149 | : : "r"(va) : "memory"); | 143 | : : "r"(va) : "memory"); |
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 297c91051413..e0766b82e165 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c | |||
| @@ -155,16 +155,28 @@ static ssize_t read_offset_data(void *dest, size_t dest_len, | |||
| 155 | return copy_len; | 155 | return copy_len; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | static unsigned long h_get_24x7_catalog_page(char page[static 4096], | 158 | static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096, |
| 159 | u32 version, u32 index) | 159 | unsigned long version, |
| 160 | unsigned long index) | ||
| 160 | { | 161 | { |
| 161 | WARN_ON(!IS_ALIGNED((unsigned long)page, 4096)); | 162 | pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)", |
| 163 | phys_4096, | ||
| 164 | version, | ||
| 165 | index); | ||
| 166 | WARN_ON(!IS_ALIGNED(phys_4096, 4096)); | ||
| 162 | return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE, | 167 | return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE, |
| 163 | virt_to_phys(page), | 168 | phys_4096, |
| 164 | version, | 169 | version, |
| 165 | index); | 170 | index); |
| 166 | } | 171 | } |
| 167 | 172 | ||
| 173 | static unsigned long h_get_24x7_catalog_page(char page[], | ||
| 174 | u64 version, u32 index) | ||
| 175 | { | ||
| 176 | return h_get_24x7_catalog_page_(virt_to_phys(page), | ||
| 177 | version, index); | ||
| 178 | } | ||
| 179 | |||
| 168 | static ssize_t catalog_read(struct file *filp, struct kobject *kobj, | 180 | static ssize_t catalog_read(struct file *filp, struct kobject *kobj, |
| 169 | struct bin_attribute *bin_attr, char *buf, | 181 | struct bin_attribute *bin_attr, char *buf, |
| 170 | loff_t offset, size_t count) | 182 | loff_t offset, size_t count) |
| @@ -173,7 +185,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj, | |||
| 173 | ssize_t ret = 0; | 185 | ssize_t ret = 0; |
| 174 | size_t catalog_len = 0, catalog_page_len = 0, page_count = 0; | 186 | size_t catalog_len = 0, catalog_page_len = 0, page_count = 0; |
| 175 | loff_t page_offset = 0; | 187 | loff_t page_offset = 0; |
| 176 | uint32_t catalog_version_num = 0; | 188 | uint64_t catalog_version_num = 0; |
| 177 | void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); | 189 | void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); |
| 178 | struct hv_24x7_catalog_page_0 *page_0 = page; | 190 | struct hv_24x7_catalog_page_0 *page_0 = page; |
| 179 | if (!page) | 191 | if (!page) |
| @@ -185,7 +197,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj, | |||
| 185 | goto e_free; | 197 | goto e_free; |
| 186 | } | 198 | } |
| 187 | 199 | ||
| 188 | catalog_version_num = be32_to_cpu(page_0->version); | 200 | catalog_version_num = be64_to_cpu(page_0->version); |
| 189 | catalog_page_len = be32_to_cpu(page_0->length); | 201 | catalog_page_len = be32_to_cpu(page_0->length); |
| 190 | catalog_len = catalog_page_len * 4096; | 202 | catalog_len = catalog_page_len * 4096; |
| 191 | 203 | ||
| @@ -208,8 +220,9 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj, | |||
| 208 | page, 4096, page_offset * 4096); | 220 | page, 4096, page_offset * 4096); |
| 209 | e_free: | 221 | e_free: |
| 210 | if (hret) | 222 | if (hret) |
| 211 | pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n", | 223 | pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:" |
| 212 | catalog_version_num, page_offset, hret); | 224 | " rc=%ld\n", |
| 225 | catalog_version_num, page_offset, hret); | ||
| 213 | kfree(page); | 226 | kfree(page); |
| 214 | 227 | ||
| 215 | pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n", | 228 | pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n", |
| @@ -243,7 +256,7 @@ e_free: \ | |||
| 243 | static DEVICE_ATTR_RO(_name) | 256 | static DEVICE_ATTR_RO(_name) |
| 244 | 257 | ||
| 245 | PAGE_0_ATTR(catalog_version, "%lld\n", | 258 | PAGE_0_ATTR(catalog_version, "%lld\n", |
| 246 | (unsigned long long)be32_to_cpu(page_0->version)); | 259 | (unsigned long long)be64_to_cpu(page_0->version)); |
| 247 | PAGE_0_ATTR(catalog_len, "%lld\n", | 260 | PAGE_0_ATTR(catalog_len, "%lld\n", |
| 248 | (unsigned long long)be32_to_cpu(page_0->length) * 4096); | 261 | (unsigned long long)be32_to_cpu(page_0->length) * 4096); |
| 249 | static BIN_ATTR_RO(catalog, 0/* real length varies */); | 262 | static BIN_ATTR_RO(catalog, 0/* real length varies */); |
| @@ -485,13 +498,13 @@ static int hv_24x7_init(void) | |||
| 485 | struct hv_perf_caps caps; | 498 | struct hv_perf_caps caps; |
| 486 | 499 | ||
| 487 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { | 500 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
| 488 | pr_info("not a virtualized system, not enabling\n"); | 501 | pr_debug("not a virtualized system, not enabling\n"); |
| 489 | return -ENODEV; | 502 | return -ENODEV; |
| 490 | } | 503 | } |
| 491 | 504 | ||
| 492 | hret = hv_perf_caps_get(&caps); | 505 | hret = hv_perf_caps_get(&caps); |
| 493 | if (hret) { | 506 | if (hret) { |
| 494 | pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n", | 507 | pr_debug("could not obtain capabilities, not enabling, rc=%ld\n", |
| 495 | hret); | 508 | hret); |
| 496 | return -ENODEV; | 509 | return -ENODEV; |
| 497 | } | 510 | } |
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 278ba7b9c2b5..c9d399a2df82 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c | |||
| @@ -78,7 +78,7 @@ static ssize_t kernel_version_show(struct device *dev, | |||
| 78 | return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT); | 78 | return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | DEVICE_ATTR_RO(kernel_version); | 81 | static DEVICE_ATTR_RO(kernel_version); |
| 82 | HV_CAPS_ATTR(version, "0x%x\n"); | 82 | HV_CAPS_ATTR(version, "0x%x\n"); |
| 83 | HV_CAPS_ATTR(ga, "%d\n"); | 83 | HV_CAPS_ATTR(ga, "%d\n"); |
| 84 | HV_CAPS_ATTR(expanded, "%d\n"); | 84 | HV_CAPS_ATTR(expanded, "%d\n"); |
| @@ -273,13 +273,13 @@ static int hv_gpci_init(void) | |||
| 273 | struct hv_perf_caps caps; | 273 | struct hv_perf_caps caps; |
| 274 | 274 | ||
| 275 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { | 275 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
| 276 | pr_info("not a virtualized system, not enabling\n"); | 276 | pr_debug("not a virtualized system, not enabling\n"); |
| 277 | return -ENODEV; | 277 | return -ENODEV; |
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | hret = hv_perf_caps_get(&caps); | 280 | hret = hv_perf_caps_get(&caps); |
| 281 | if (hret) { | 281 | if (hret) { |
| 282 | pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n", | 282 | pr_debug("could not obtain capabilities, not enabling, rc=%ld\n", |
| 283 | hret); | 283 | hret); |
| 284 | return -ENODEV; | 284 | return -ENODEV; |
| 285 | } | 285 | } |
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c index b9827b0d87e4..788a1977b9a5 100644 --- a/arch/powerpc/platforms/powernv/opal-dump.c +++ b/arch/powerpc/platforms/powernv/opal-dump.c | |||
| @@ -209,89 +209,20 @@ static struct kobj_type dump_ktype = { | |||
| 209 | .default_attrs = dump_default_attrs, | 209 | .default_attrs = dump_default_attrs, |
| 210 | }; | 210 | }; |
| 211 | 211 | ||
| 212 | static void free_dump_sg_list(struct opal_sg_list *list) | 212 | static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type) |
| 213 | { | ||
| 214 | struct opal_sg_list *sg1; | ||
| 215 | while (list) { | ||
| 216 | sg1 = list->next; | ||
| 217 | kfree(list); | ||
| 218 | list = sg1; | ||
| 219 | } | ||
| 220 | list = NULL; | ||
| 221 | } | ||
| 222 | |||
| 223 | static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump) | ||
| 224 | { | ||
| 225 | struct opal_sg_list *sg1, *list = NULL; | ||
| 226 | void *addr; | ||
| 227 | int64_t size; | ||
| 228 | |||
| 229 | addr = dump->buffer; | ||
| 230 | size = dump->size; | ||
| 231 | |||
| 232 | sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
| 233 | if (!sg1) | ||
| 234 | goto nomem; | ||
| 235 | |||
| 236 | list = sg1; | ||
| 237 | sg1->num_entries = 0; | ||
| 238 | while (size > 0) { | ||
| 239 | /* Translate virtual address to physical address */ | ||
| 240 | sg1->entry[sg1->num_entries].data = | ||
| 241 | (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT); | ||
| 242 | |||
| 243 | if (size > PAGE_SIZE) | ||
| 244 | sg1->entry[sg1->num_entries].length = PAGE_SIZE; | ||
| 245 | else | ||
| 246 | sg1->entry[sg1->num_entries].length = size; | ||
| 247 | |||
| 248 | sg1->num_entries++; | ||
| 249 | if (sg1->num_entries >= SG_ENTRIES_PER_NODE) { | ||
| 250 | sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
| 251 | if (!sg1->next) | ||
| 252 | goto nomem; | ||
| 253 | |||
| 254 | sg1 = sg1->next; | ||
| 255 | sg1->num_entries = 0; | ||
| 256 | } | ||
| 257 | addr += PAGE_SIZE; | ||
| 258 | size -= PAGE_SIZE; | ||
| 259 | } | ||
| 260 | return list; | ||
| 261 | |||
| 262 | nomem: | ||
| 263 | pr_err("%s : Failed to allocate memory\n", __func__); | ||
| 264 | free_dump_sg_list(list); | ||
| 265 | return NULL; | ||
| 266 | } | ||
| 267 | |||
| 268 | static void sglist_to_phy_addr(struct opal_sg_list *list) | ||
| 269 | { | ||
| 270 | struct opal_sg_list *sg, *next; | ||
| 271 | |||
| 272 | for (sg = list; sg; sg = next) { | ||
| 273 | next = sg->next; | ||
| 274 | /* Don't translate NULL pointer for last entry */ | ||
| 275 | if (sg->next) | ||
| 276 | sg->next = (struct opal_sg_list *)__pa(sg->next); | ||
| 277 | else | ||
| 278 | sg->next = NULL; | ||
| 279 | |||
| 280 | /* Convert num_entries to length */ | ||
| 281 | sg->num_entries = | ||
| 282 | sg->num_entries * sizeof(struct opal_sg_entry) + 16; | ||
| 283 | } | ||
| 284 | } | ||
| 285 | |||
| 286 | static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type) | ||
| 287 | { | 213 | { |
| 214 | __be32 id, size, type; | ||
| 288 | int rc; | 215 | int rc; |
| 289 | *type = 0xffffffff; | ||
| 290 | 216 | ||
| 291 | rc = opal_dump_info2(id, size, type); | 217 | type = cpu_to_be32(0xffffffff); |
| 292 | 218 | ||
| 219 | rc = opal_dump_info2(&id, &size, &type); | ||
| 293 | if (rc == OPAL_PARAMETER) | 220 | if (rc == OPAL_PARAMETER) |
| 294 | rc = opal_dump_info(id, size); | 221 | rc = opal_dump_info(&id, &size); |
| 222 | |||
| 223 | *dump_id = be32_to_cpu(id); | ||
| 224 | *dump_size = be32_to_cpu(size); | ||
| 225 | *dump_type = be32_to_cpu(type); | ||
| 295 | 226 | ||
| 296 | if (rc) | 227 | if (rc) |
| 297 | pr_warn("%s: Failed to get dump info (%d)\n", | 228 | pr_warn("%s: Failed to get dump info (%d)\n", |
| @@ -314,15 +245,12 @@ static int64_t dump_read_data(struct dump_obj *dump) | |||
| 314 | } | 245 | } |
| 315 | 246 | ||
| 316 | /* Generate SG list */ | 247 | /* Generate SG list */ |
| 317 | list = dump_data_to_sglist(dump); | 248 | list = opal_vmalloc_to_sg_list(dump->buffer, dump->size); |
| 318 | if (!list) { | 249 | if (!list) { |
| 319 | rc = -ENOMEM; | 250 | rc = -ENOMEM; |
| 320 | goto out; | 251 | goto out; |
| 321 | } | 252 | } |
| 322 | 253 | ||
| 323 | /* Translate sg list addr to real address */ | ||
| 324 | sglist_to_phy_addr(list); | ||
| 325 | |||
| 326 | /* First entry address */ | 254 | /* First entry address */ |
| 327 | addr = __pa(list); | 255 | addr = __pa(list); |
| 328 | 256 | ||
| @@ -341,7 +269,7 @@ static int64_t dump_read_data(struct dump_obj *dump) | |||
| 341 | __func__, dump->id); | 269 | __func__, dump->id); |
| 342 | 270 | ||
| 343 | /* Free SG list */ | 271 | /* Free SG list */ |
| 344 | free_dump_sg_list(list); | 272 | opal_free_sg_list(list); |
| 345 | 273 | ||
| 346 | out: | 274 | out: |
| 347 | return rc; | 275 | return rc; |
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index ef7bc2a97862..10268c41d830 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c | |||
| @@ -238,18 +238,25 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) | |||
| 238 | 238 | ||
| 239 | static void elog_work_fn(struct work_struct *work) | 239 | static void elog_work_fn(struct work_struct *work) |
| 240 | { | 240 | { |
| 241 | size_t elog_size; | 241 | __be64 size; |
| 242 | __be64 id; | ||
| 243 | __be64 type; | ||
| 244 | uint64_t elog_size; | ||
| 242 | uint64_t log_id; | 245 | uint64_t log_id; |
| 243 | uint64_t elog_type; | 246 | uint64_t elog_type; |
| 244 | int rc; | 247 | int rc; |
| 245 | char name[2+16+1]; | 248 | char name[2+16+1]; |
| 246 | 249 | ||
| 247 | rc = opal_get_elog_size(&log_id, &elog_size, &elog_type); | 250 | rc = opal_get_elog_size(&id, &size, &type); |
| 248 | if (rc != OPAL_SUCCESS) { | 251 | if (rc != OPAL_SUCCESS) { |
| 249 | pr_err("ELOG: Opal log read failed\n"); | 252 | pr_err("ELOG: Opal log read failed\n"); |
| 250 | return; | 253 | return; |
| 251 | } | 254 | } |
| 252 | 255 | ||
| 256 | elog_size = be64_to_cpu(size); | ||
| 257 | log_id = be64_to_cpu(id); | ||
| 258 | elog_type = be64_to_cpu(type); | ||
| 259 | |||
| 253 | BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); | 260 | BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); |
| 254 | 261 | ||
| 255 | if (elog_size >= OPAL_MAX_ERRLOG_SIZE) | 262 | if (elog_size >= OPAL_MAX_ERRLOG_SIZE) |
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c index 714ef972406b..dc487ff04704 100644 --- a/arch/powerpc/platforms/powernv/opal-flash.c +++ b/arch/powerpc/platforms/powernv/opal-flash.c | |||
| @@ -79,9 +79,6 @@ | |||
| 79 | /* XXX: Assume candidate image size is <= 1GB */ | 79 | /* XXX: Assume candidate image size is <= 1GB */ |
| 80 | #define MAX_IMAGE_SIZE 0x40000000 | 80 | #define MAX_IMAGE_SIZE 0x40000000 |
| 81 | 81 | ||
| 82 | /* Flash sg list version */ | ||
| 83 | #define SG_LIST_VERSION (1UL) | ||
| 84 | |||
| 85 | /* Image status */ | 82 | /* Image status */ |
| 86 | enum { | 83 | enum { |
| 87 | IMAGE_INVALID, | 84 | IMAGE_INVALID, |
| @@ -131,11 +128,15 @@ static DEFINE_MUTEX(image_data_mutex); | |||
| 131 | */ | 128 | */ |
| 132 | static inline void opal_flash_validate(void) | 129 | static inline void opal_flash_validate(void) |
| 133 | { | 130 | { |
| 134 | struct validate_flash_t *args_buf = &validate_flash_data; | 131 | long ret; |
| 132 | void *buf = validate_flash_data.buf; | ||
| 133 | __be32 size, result; | ||
| 135 | 134 | ||
| 136 | args_buf->status = opal_validate_flash(__pa(args_buf->buf), | 135 | ret = opal_validate_flash(__pa(buf), &size, &result); |
| 137 | &(args_buf->buf_size), | 136 | |
| 138 | &(args_buf->result)); | 137 | validate_flash_data.status = ret; |
| 138 | validate_flash_data.buf_size = be32_to_cpu(size); | ||
| 139 | validate_flash_data.result = be32_to_cpu(result); | ||
| 139 | } | 140 | } |
| 140 | 141 | ||
| 141 | /* | 142 | /* |
| @@ -268,93 +269,11 @@ static ssize_t manage_store(struct kobject *kobj, | |||
| 268 | } | 269 | } |
| 269 | 270 | ||
| 270 | /* | 271 | /* |
| 271 | * Free sg list | ||
| 272 | */ | ||
| 273 | static void free_sg_list(struct opal_sg_list *list) | ||
| 274 | { | ||
| 275 | struct opal_sg_list *sg1; | ||
| 276 | while (list) { | ||
| 277 | sg1 = list->next; | ||
| 278 | kfree(list); | ||
| 279 | list = sg1; | ||
| 280 | } | ||
| 281 | list = NULL; | ||
| 282 | } | ||
| 283 | |||
| 284 | /* | ||
| 285 | * Build candidate image scatter gather list | ||
| 286 | * | ||
| 287 | * list format: | ||
| 288 | * ----------------------------------- | ||
| 289 | * | VER (8) | Entry length in bytes | | ||
| 290 | * ----------------------------------- | ||
| 291 | * | Pointer to next entry | | ||
| 292 | * ----------------------------------- | ||
| 293 | * | Address of memory area 1 | | ||
| 294 | * ----------------------------------- | ||
| 295 | * | Length of memory area 1 | | ||
| 296 | * ----------------------------------- | ||
| 297 | * | ......... | | ||
| 298 | * ----------------------------------- | ||
| 299 | * | ......... | | ||
| 300 | * ----------------------------------- | ||
| 301 | * | Address of memory area N | | ||
| 302 | * ----------------------------------- | ||
| 303 | * | Length of memory area N | | ||
| 304 | * ----------------------------------- | ||
| 305 | */ | ||
| 306 | static struct opal_sg_list *image_data_to_sglist(void) | ||
| 307 | { | ||
| 308 | struct opal_sg_list *sg1, *list = NULL; | ||
| 309 | void *addr; | ||
| 310 | int size; | ||
| 311 | |||
| 312 | addr = image_data.data; | ||
| 313 | size = image_data.size; | ||
| 314 | |||
| 315 | sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
| 316 | if (!sg1) | ||
| 317 | return NULL; | ||
| 318 | |||
| 319 | list = sg1; | ||
| 320 | sg1->num_entries = 0; | ||
| 321 | while (size > 0) { | ||
| 322 | /* Translate virtual address to physical address */ | ||
| 323 | sg1->entry[sg1->num_entries].data = | ||
| 324 | (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT); | ||
| 325 | |||
| 326 | if (size > PAGE_SIZE) | ||
| 327 | sg1->entry[sg1->num_entries].length = PAGE_SIZE; | ||
| 328 | else | ||
| 329 | sg1->entry[sg1->num_entries].length = size; | ||
| 330 | |||
| 331 | sg1->num_entries++; | ||
| 332 | if (sg1->num_entries >= SG_ENTRIES_PER_NODE) { | ||
| 333 | sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
| 334 | if (!sg1->next) { | ||
| 335 | pr_err("%s : Failed to allocate memory\n", | ||
| 336 | __func__); | ||
| 337 | goto nomem; | ||
| 338 | } | ||
| 339 | |||
| 340 | sg1 = sg1->next; | ||
| 341 | sg1->num_entries = 0; | ||
| 342 | } | ||
| 343 | addr += PAGE_SIZE; | ||
| 344 | size -= PAGE_SIZE; | ||
| 345 | } | ||
| 346 | return list; | ||
| 347 | nomem: | ||
| 348 | free_sg_list(list); | ||
| 349 | return NULL; | ||
| 350 | } | ||
| 351 | |||
| 352 | /* | ||
| 353 | * OPAL update flash | 272 | * OPAL update flash |
| 354 | */ | 273 | */ |
| 355 | static int opal_flash_update(int op) | 274 | static int opal_flash_update(int op) |
| 356 | { | 275 | { |
| 357 | struct opal_sg_list *sg, *list, *next; | 276 | struct opal_sg_list *list; |
| 358 | unsigned long addr; | 277 | unsigned long addr; |
| 359 | int64_t rc = OPAL_PARAMETER; | 278 | int64_t rc = OPAL_PARAMETER; |
| 360 | 279 | ||
| @@ -364,30 +283,13 @@ static int opal_flash_update(int op) | |||
| 364 | goto flash; | 283 | goto flash; |
| 365 | } | 284 | } |
| 366 | 285 | ||
| 367 | list = image_data_to_sglist(); | 286 | list = opal_vmalloc_to_sg_list(image_data.data, image_data.size); |
| 368 | if (!list) | 287 | if (!list) |
| 369 | goto invalid_img; | 288 | goto invalid_img; |
| 370 | 289 | ||
| 371 | /* First entry address */ | 290 | /* First entry address */ |
| 372 | addr = __pa(list); | 291 | addr = __pa(list); |
| 373 | 292 | ||
| 374 | /* Translate sg list address to absolute */ | ||
| 375 | for (sg = list; sg; sg = next) { | ||
| 376 | next = sg->next; | ||
| 377 | /* Don't translate NULL pointer for last entry */ | ||
| 378 | if (sg->next) | ||
| 379 | sg->next = (struct opal_sg_list *)__pa(sg->next); | ||
| 380 | else | ||
| 381 | sg->next = NULL; | ||
| 382 | |||
| 383 | /* | ||
| 384 | * Convert num_entries to version/length format | ||
| 385 | * to satisfy OPAL. | ||
| 386 | */ | ||
| 387 | sg->num_entries = (SG_LIST_VERSION << 56) | | ||
| 388 | (sg->num_entries * sizeof(struct opal_sg_entry) + 16); | ||
| 389 | } | ||
| 390 | |||
| 391 | pr_alert("FLASH: Image is %u bytes\n", image_data.size); | 293 | pr_alert("FLASH: Image is %u bytes\n", image_data.size); |
| 392 | pr_alert("FLASH: Image update requested\n"); | 294 | pr_alert("FLASH: Image update requested\n"); |
| 393 | pr_alert("FLASH: Image will be updated during system reboot\n"); | 295 | pr_alert("FLASH: Image will be updated during system reboot\n"); |
diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c index 6b614726baf2..d202f9bc3683 100644 --- a/arch/powerpc/platforms/powernv/opal-sysparam.c +++ b/arch/powerpc/platforms/powernv/opal-sysparam.c | |||
| @@ -39,10 +39,11 @@ struct param_attr { | |||
| 39 | struct kobj_attribute kobj_attr; | 39 | struct kobj_attribute kobj_attr; |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | static int opal_get_sys_param(u32 param_id, u32 length, void *buffer) | 42 | static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer) |
| 43 | { | 43 | { |
| 44 | struct opal_msg msg; | 44 | struct opal_msg msg; |
| 45 | int ret, token; | 45 | ssize_t ret; |
| 46 | int token; | ||
| 46 | 47 | ||
| 47 | token = opal_async_get_token_interruptible(); | 48 | token = opal_async_get_token_interruptible(); |
| 48 | if (token < 0) { | 49 | if (token < 0) { |
| @@ -59,7 +60,7 @@ static int opal_get_sys_param(u32 param_id, u32 length, void *buffer) | |||
| 59 | 60 | ||
| 60 | ret = opal_async_wait_response(token, &msg); | 61 | ret = opal_async_wait_response(token, &msg); |
| 61 | if (ret) { | 62 | if (ret) { |
| 62 | pr_err("%s: Failed to wait for the async response, %d\n", | 63 | pr_err("%s: Failed to wait for the async response, %zd\n", |
| 63 | __func__, ret); | 64 | __func__, ret); |
| 64 | goto out_token; | 65 | goto out_token; |
| 65 | } | 66 | } |
| @@ -111,7 +112,7 @@ static ssize_t sys_param_show(struct kobject *kobj, | |||
| 111 | { | 112 | { |
| 112 | struct param_attr *attr = container_of(kobj_attr, struct param_attr, | 113 | struct param_attr *attr = container_of(kobj_attr, struct param_attr, |
| 113 | kobj_attr); | 114 | kobj_attr); |
| 114 | int ret; | 115 | ssize_t ret; |
| 115 | 116 | ||
| 116 | mutex_lock(&opal_sysparam_mutex); | 117 | mutex_lock(&opal_sysparam_mutex); |
| 117 | ret = opal_get_sys_param(attr->param_id, attr->param_size, | 118 | ret = opal_get_sys_param(attr->param_id, attr->param_size, |
| @@ -121,9 +122,10 @@ static ssize_t sys_param_show(struct kobject *kobj, | |||
| 121 | 122 | ||
| 122 | memcpy(buf, param_data_buf, attr->param_size); | 123 | memcpy(buf, param_data_buf, attr->param_size); |
| 123 | 124 | ||
| 125 | ret = attr->param_size; | ||
| 124 | out: | 126 | out: |
| 125 | mutex_unlock(&opal_sysparam_mutex); | 127 | mutex_unlock(&opal_sysparam_mutex); |
| 126 | return ret ? ret : attr->param_size; | 128 | return ret; |
| 127 | } | 129 | } |
| 128 | 130 | ||
| 129 | static ssize_t sys_param_store(struct kobject *kobj, | 131 | static ssize_t sys_param_store(struct kobject *kobj, |
| @@ -131,14 +133,20 @@ static ssize_t sys_param_store(struct kobject *kobj, | |||
| 131 | { | 133 | { |
| 132 | struct param_attr *attr = container_of(kobj_attr, struct param_attr, | 134 | struct param_attr *attr = container_of(kobj_attr, struct param_attr, |
| 133 | kobj_attr); | 135 | kobj_attr); |
| 134 | int ret; | 136 | ssize_t ret; |
| 137 | |||
| 138 | /* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */ | ||
| 139 | if (count > MAX_PARAM_DATA_LEN) | ||
| 140 | count = MAX_PARAM_DATA_LEN; | ||
| 135 | 141 | ||
| 136 | mutex_lock(&opal_sysparam_mutex); | 142 | mutex_lock(&opal_sysparam_mutex); |
| 137 | memcpy(param_data_buf, buf, count); | 143 | memcpy(param_data_buf, buf, count); |
| 138 | ret = opal_set_sys_param(attr->param_id, attr->param_size, | 144 | ret = opal_set_sys_param(attr->param_id, attr->param_size, |
| 139 | param_data_buf); | 145 | param_data_buf); |
| 140 | mutex_unlock(&opal_sysparam_mutex); | 146 | mutex_unlock(&opal_sysparam_mutex); |
| 141 | return ret ? ret : count; | 147 | if (!ret) |
| 148 | ret = count; | ||
| 149 | return ret; | ||
| 142 | } | 150 | } |
| 143 | 151 | ||
| 144 | void __init opal_sys_param_init(void) | 152 | void __init opal_sys_param_init(void) |
| @@ -214,13 +222,13 @@ void __init opal_sys_param_init(void) | |||
| 214 | } | 222 | } |
| 215 | 223 | ||
| 216 | if (of_property_read_u32_array(sysparam, "param-len", size, count)) { | 224 | if (of_property_read_u32_array(sysparam, "param-len", size, count)) { |
| 217 | pr_err("SYSPARAM: Missing propery param-len in the DT\n"); | 225 | pr_err("SYSPARAM: Missing property param-len in the DT\n"); |
| 218 | goto out_free_perm; | 226 | goto out_free_perm; |
| 219 | } | 227 | } |
| 220 | 228 | ||
| 221 | 229 | ||
| 222 | if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) { | 230 | if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) { |
| 223 | pr_err("SYSPARAM: Missing propery param-perm in the DT\n"); | 231 | pr_err("SYSPARAM: Missing property param-perm in the DT\n"); |
| 224 | goto out_free_perm; | 232 | goto out_free_perm; |
| 225 | } | 233 | } |
| 226 | 234 | ||
| @@ -233,6 +241,12 @@ void __init opal_sys_param_init(void) | |||
| 233 | 241 | ||
| 234 | /* For each of the parameters, populate the parameter attributes */ | 242 | /* For each of the parameters, populate the parameter attributes */ |
| 235 | for (i = 0; i < count; i++) { | 243 | for (i = 0; i < count; i++) { |
| 244 | if (size[i] > MAX_PARAM_DATA_LEN) { | ||
| 245 | pr_warn("SYSPARAM: Not creating parameter %d as size " | ||
| 246 | "exceeds buffer length\n", i); | ||
| 247 | continue; | ||
| 248 | } | ||
| 249 | |||
| 236 | sysfs_attr_init(&attr[i].kobj_attr.attr); | 250 | sysfs_attr_init(&attr[i].kobj_attr.attr); |
| 237 | attr[i].param_id = id[i]; | 251 | attr[i].param_id = id[i]; |
| 238 | attr[i].param_size = size[i]; | 252 | attr[i].param_size = size[i]; |
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 49d2f00019e5..360ad80c754c 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c | |||
| @@ -242,14 +242,14 @@ void opal_notifier_update_evt(uint64_t evt_mask, | |||
| 242 | void opal_notifier_enable(void) | 242 | void opal_notifier_enable(void) |
| 243 | { | 243 | { |
| 244 | int64_t rc; | 244 | int64_t rc; |
| 245 | uint64_t evt = 0; | 245 | __be64 evt = 0; |
| 246 | 246 | ||
| 247 | atomic_set(&opal_notifier_hold, 0); | 247 | atomic_set(&opal_notifier_hold, 0); |
| 248 | 248 | ||
| 249 | /* Process pending events */ | 249 | /* Process pending events */ |
| 250 | rc = opal_poll_events(&evt); | 250 | rc = opal_poll_events(&evt); |
| 251 | if (rc == OPAL_SUCCESS && evt) | 251 | if (rc == OPAL_SUCCESS && evt) |
| 252 | opal_do_notifier(evt); | 252 | opal_do_notifier(be64_to_cpu(evt)); |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | void opal_notifier_disable(void) | 255 | void opal_notifier_disable(void) |
| @@ -529,7 +529,7 @@ static irqreturn_t opal_interrupt(int irq, void *data) | |||
| 529 | 529 | ||
| 530 | opal_handle_interrupt(virq_to_hw(irq), &events); | 530 | opal_handle_interrupt(virq_to_hw(irq), &events); |
| 531 | 531 | ||
| 532 | opal_do_notifier(events); | 532 | opal_do_notifier(be64_to_cpu(events)); |
| 533 | 533 | ||
| 534 | return IRQ_HANDLED; | 534 | return IRQ_HANDLED; |
| 535 | } | 535 | } |
| @@ -638,3 +638,66 @@ void opal_shutdown(void) | |||
| 638 | 638 | ||
| 639 | /* Export this so that test modules can use it */ | 639 | /* Export this so that test modules can use it */ |
| 640 | EXPORT_SYMBOL_GPL(opal_invalid_call); | 640 | EXPORT_SYMBOL_GPL(opal_invalid_call); |
| 641 | |||
| 642 | /* Convert a region of vmalloc memory to an opal sg list */ | ||
| 643 | struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, | ||
| 644 | unsigned long vmalloc_size) | ||
| 645 | { | ||
| 646 | struct opal_sg_list *sg, *first = NULL; | ||
| 647 | unsigned long i = 0; | ||
| 648 | |||
| 649 | sg = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
| 650 | if (!sg) | ||
| 651 | goto nomem; | ||
| 652 | |||
| 653 | first = sg; | ||
| 654 | |||
| 655 | while (vmalloc_size > 0) { | ||
| 656 | uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT; | ||
| 657 | uint64_t length = min(vmalloc_size, PAGE_SIZE); | ||
| 658 | |||
| 659 | sg->entry[i].data = cpu_to_be64(data); | ||
| 660 | sg->entry[i].length = cpu_to_be64(length); | ||
| 661 | i++; | ||
| 662 | |||
| 663 | if (i >= SG_ENTRIES_PER_NODE) { | ||
| 664 | struct opal_sg_list *next; | ||
| 665 | |||
| 666 | next = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
| 667 | if (!next) | ||
| 668 | goto nomem; | ||
| 669 | |||
| 670 | sg->length = cpu_to_be64( | ||
| 671 | i * sizeof(struct opal_sg_entry) + 16); | ||
| 672 | i = 0; | ||
| 673 | sg->next = cpu_to_be64(__pa(next)); | ||
| 674 | sg = next; | ||
| 675 | } | ||
| 676 | |||
| 677 | vmalloc_addr += length; | ||
| 678 | vmalloc_size -= length; | ||
| 679 | } | ||
| 680 | |||
| 681 | sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16); | ||
| 682 | |||
| 683 | return first; | ||
| 684 | |||
| 685 | nomem: | ||
| 686 | pr_err("%s : Failed to allocate memory\n", __func__); | ||
| 687 | opal_free_sg_list(first); | ||
| 688 | return NULL; | ||
| 689 | } | ||
| 690 | |||
| 691 | void opal_free_sg_list(struct opal_sg_list *sg) | ||
| 692 | { | ||
| 693 | while (sg) { | ||
| 694 | uint64_t next = be64_to_cpu(sg->next); | ||
| 695 | |||
| 696 | kfree(sg); | ||
| 697 | |||
| 698 | if (next) | ||
| 699 | sg = __va(next); | ||
| 700 | else | ||
| 701 | sg = NULL; | ||
| 702 | } | ||
| 703 | } | ||
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 3b2b4fb3585b..98824aa99173 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
| @@ -343,7 +343,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) | |||
| 343 | pci_name(dev)); | 343 | pci_name(dev)); |
| 344 | continue; | 344 | continue; |
| 345 | } | 345 | } |
| 346 | pci_dev_get(dev); | ||
| 347 | pdn->pcidev = dev; | 346 | pdn->pcidev = dev; |
| 348 | pdn->pe_number = pe->pe_number; | 347 | pdn->pe_number = pe->pe_number; |
| 349 | pe->dma_weight += pnv_ioda_dma_weight(dev); | 348 | pe->dma_weight += pnv_ioda_dma_weight(dev); |
| @@ -462,7 +461,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev | |||
| 462 | 461 | ||
| 463 | pe = &phb->ioda.pe_array[pdn->pe_number]; | 462 | pe = &phb->ioda.pe_array[pdn->pe_number]; |
| 464 | WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); | 463 | WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); |
| 465 | set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); | 464 | set_iommu_table_base(&pdev->dev, &pe->tce32_table); |
| 466 | } | 465 | } |
| 467 | 466 | ||
| 468 | static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, | 467 | static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 61cf8fa9c61b..8723d32632f5 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
| @@ -162,18 +162,62 @@ static void pnv_shutdown(void) | |||
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | #ifdef CONFIG_KEXEC | 164 | #ifdef CONFIG_KEXEC |
| 165 | static void pnv_kexec_wait_secondaries_down(void) | ||
| 166 | { | ||
| 167 | int my_cpu, i, notified = -1; | ||
| 168 | |||
| 169 | my_cpu = get_cpu(); | ||
| 170 | |||
| 171 | for_each_online_cpu(i) { | ||
| 172 | uint8_t status; | ||
| 173 | int64_t rc; | ||
| 174 | |||
| 175 | if (i == my_cpu) | ||
| 176 | continue; | ||
| 177 | |||
| 178 | for (;;) { | ||
| 179 | rc = opal_query_cpu_status(get_hard_smp_processor_id(i), | ||
| 180 | &status); | ||
| 181 | if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED) | ||
| 182 | break; | ||
| 183 | barrier(); | ||
| 184 | if (i != notified) { | ||
| 185 | printk(KERN_INFO "kexec: waiting for cpu %d " | ||
| 186 | "(physical %d) to enter OPAL\n", | ||
| 187 | i, paca[i].hw_cpu_id); | ||
| 188 | notified = i; | ||
| 189 | } | ||
| 190 | } | ||
| 191 | } | ||
| 192 | } | ||
| 193 | |||
| 165 | static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) | 194 | static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) |
| 166 | { | 195 | { |
| 167 | xics_kexec_teardown_cpu(secondary); | 196 | xics_kexec_teardown_cpu(secondary); |
| 168 | 197 | ||
| 169 | /* Return secondary CPUs to firmware on OPAL v3 */ | 198 | /* On OPAL v3, we return all CPUs to firmware */ |
| 170 | if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) { | 199 | |
| 200 | if (!firmware_has_feature(FW_FEATURE_OPALv3)) | ||
| 201 | return; | ||
| 202 | |||
| 203 | if (secondary) { | ||
| 204 | /* Return secondary CPUs to firmware on OPAL v3 */ | ||
| 171 | mb(); | 205 | mb(); |
| 172 | get_paca()->kexec_state = KEXEC_STATE_REAL_MODE; | 206 | get_paca()->kexec_state = KEXEC_STATE_REAL_MODE; |
| 173 | mb(); | 207 | mb(); |
| 174 | 208 | ||
| 175 | /* Return the CPU to OPAL */ | 209 | /* Return the CPU to OPAL */ |
| 176 | opal_return_cpu(); | 210 | opal_return_cpu(); |
| 211 | } else if (crash_shutdown) { | ||
| 212 | /* | ||
| 213 | * On crash, we don't wait for secondaries to go | ||
| 214 | * down as they might be unreachable or hung, so | ||
| 215 | * instead we just wait a bit and move on. | ||
| 216 | */ | ||
| 217 | mdelay(1); | ||
| 218 | } else { | ||
| 219 | /* Primary waits for the secondaries to have reached OPAL */ | ||
| 220 | pnv_kexec_wait_secondaries_down(); | ||
| 177 | } | 221 | } |
| 178 | } | 222 | } |
| 179 | #endif /* CONFIG_KEXEC */ | 223 | #endif /* CONFIG_KEXEC */ |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 908672bdcea6..bf5fcd452168 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <asm/cputhreads.h> | 30 | #include <asm/cputhreads.h> |
| 31 | #include <asm/xics.h> | 31 | #include <asm/xics.h> |
| 32 | #include <asm/opal.h> | 32 | #include <asm/opal.h> |
| 33 | #include <asm/runlatch.h> | ||
| 33 | 34 | ||
| 34 | #include "powernv.h" | 35 | #include "powernv.h" |
| 35 | 36 | ||
| @@ -156,7 +157,9 @@ static void pnv_smp_cpu_kill_self(void) | |||
| 156 | */ | 157 | */ |
| 157 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); | 158 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); |
| 158 | while (!generic_check_cpu_restart(cpu)) { | 159 | while (!generic_check_cpu_restart(cpu)) { |
| 160 | ppc64_runlatch_off(); | ||
| 159 | power7_nap(); | 161 | power7_nap(); |
| 162 | ppc64_runlatch_on(); | ||
| 160 | if (!generic_check_cpu_restart(cpu)) { | 163 | if (!generic_check_cpu_restart(cpu)) { |
| 161 | DBG("CPU%d Unexpected exit while offline !\n", cpu); | 164 | DBG("CPU%d Unexpected exit while offline !\n", cpu); |
| 162 | /* We may be getting an IPI, so we re-enable | 165 | /* We may be getting an IPI, so we re-enable |
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 9b8e05078a63..20d62975856f 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
| @@ -88,13 +88,14 @@ void set_default_offline_state(int cpu) | |||
| 88 | 88 | ||
| 89 | static void rtas_stop_self(void) | 89 | static void rtas_stop_self(void) |
| 90 | { | 90 | { |
| 91 | struct rtas_args args = { | 91 | static struct rtas_args args = { |
| 92 | .token = cpu_to_be32(rtas_stop_self_token), | ||
| 93 | .nargs = 0, | 92 | .nargs = 0, |
| 94 | .nret = 1, | 93 | .nret = 1, |
| 95 | .rets = &args.args[0], | 94 | .rets = &args.args[0], |
| 96 | }; | 95 | }; |
| 97 | 96 | ||
| 97 | args.token = cpu_to_be32(rtas_stop_self_token); | ||
| 98 | |||
| 98 | local_irq_disable(); | 99 | local_irq_disable(); |
| 99 | 100 | ||
| 100 | BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); | 101 | BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); |
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 573b488fc48b..7f75c94af822 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
| @@ -100,10 +100,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz | |||
| 100 | 100 | ||
| 101 | start_pfn = base >> PAGE_SHIFT; | 101 | start_pfn = base >> PAGE_SHIFT; |
| 102 | 102 | ||
| 103 | if (!pfn_valid(start_pfn)) { | 103 | lock_device_hotplug(); |
| 104 | memblock_remove(base, memblock_size); | 104 | |
| 105 | return 0; | 105 | if (!pfn_valid(start_pfn)) |
| 106 | } | 106 | goto out; |
| 107 | 107 | ||
| 108 | block_sz = memory_block_size_bytes(); | 108 | block_sz = memory_block_size_bytes(); |
| 109 | sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; | 109 | sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; |
| @@ -114,8 +114,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz | |||
| 114 | base += MIN_MEMORY_BLOCK_SIZE; | 114 | base += MIN_MEMORY_BLOCK_SIZE; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | out: | ||
| 117 | /* Update memory regions for memory remove */ | 118 | /* Update memory regions for memory remove */ |
| 118 | memblock_remove(base, memblock_size); | 119 | memblock_remove(base, memblock_size); |
| 120 | unlock_device_hotplug(); | ||
| 119 | return 0; | 121 | return 0; |
| 120 | } | 122 | } |
| 121 | 123 | ||
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c index 64603a10b863..4914fd3f41ec 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/arch/powerpc/sysdev/ppc4xx_pci.c | |||
| @@ -1058,7 +1058,7 @@ static int __init apm821xx_pciex_core_init(struct device_node *np) | |||
| 1058 | return 1; | 1058 | return 1; |
| 1059 | } | 1059 | } |
| 1060 | 1060 | ||
| 1061 | static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port) | 1061 | static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port) |
| 1062 | { | 1062 | { |
| 1063 | u32 val; | 1063 | u32 val; |
| 1064 | 1064 | ||
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 9c36dc398f90..452d3ebd9d0f 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
| @@ -276,7 +276,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter) | |||
| 276 | case BPF_S_LD_W_IND: | 276 | case BPF_S_LD_W_IND: |
| 277 | case BPF_S_LD_H_IND: | 277 | case BPF_S_LD_H_IND: |
| 278 | case BPF_S_LD_B_IND: | 278 | case BPF_S_LD_B_IND: |
| 279 | case BPF_S_LDX_B_MSH: | ||
| 280 | case BPF_S_LD_IMM: | 279 | case BPF_S_LD_IMM: |
| 281 | case BPF_S_LD_MEM: | 280 | case BPF_S_LD_MEM: |
| 282 | case BPF_S_MISC_TXA: | 281 | case BPF_S_MISC_TXA: |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index d1b7c377a234..ce6ad7e6a7d7 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
| @@ -83,7 +83,9 @@ else | |||
| 83 | KBUILD_CFLAGS += -m64 | 83 | KBUILD_CFLAGS += -m64 |
| 84 | 84 | ||
| 85 | # Don't autogenerate traditional x87, MMX or SSE instructions | 85 | # Don't autogenerate traditional x87, MMX or SSE instructions |
| 86 | KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387 | 86 | KBUILD_CFLAGS += -mno-mmx -mno-sse |
| 87 | KBUILD_CFLAGS += $(call cc-option,-mno-80387) | ||
| 88 | KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387) | ||
| 87 | 89 | ||
| 88 | # Use -mpreferred-stack-boundary=3 if supported. | 90 | # Use -mpreferred-stack-boundary=3 if supported. |
| 89 | KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) | 91 | KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 6ad4658de705..d23aa82e7a7b 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
| @@ -3425,6 +3425,11 @@ int get_nr_irqs_gsi(void) | |||
| 3425 | return nr_irqs_gsi; | 3425 | return nr_irqs_gsi; |
| 3426 | } | 3426 | } |
| 3427 | 3427 | ||
| 3428 | unsigned int arch_dynirq_lower_bound(unsigned int from) | ||
| 3429 | { | ||
| 3430 | return from < nr_irqs_gsi ? nr_irqs_gsi : from; | ||
| 3431 | } | ||
| 3432 | |||
| 3428 | int __init arch_probe_nr_irqs(void) | 3433 | int __init arch_probe_nr_irqs(void) |
| 3429 | { | 3434 | { |
| 3430 | int nr; | 3435 | int nr; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 7c87424d4140..619f7699487a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
| @@ -543,7 +543,8 @@ static int rapl_cpu_prepare(int cpu) | |||
| 543 | if (phys_id < 0) | 543 | if (phys_id < 0) |
| 544 | return -1; | 544 | return -1; |
| 545 | 545 | ||
| 546 | if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) | 546 | /* protect rdmsrl() to handle virtualization */ |
| 547 | if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) | ||
| 547 | return -1; | 548 | return -1; |
| 548 | 549 | ||
| 549 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); | 550 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); |
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index f6584a90aba3..5edc34b5b951 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
| @@ -26,6 +26,9 @@ | |||
| 26 | 26 | ||
| 27 | #define TOPOLOGY_REGISTER_OFFSET 0x10 | 27 | #define TOPOLOGY_REGISTER_OFFSET 0x10 |
| 28 | 28 | ||
| 29 | /* Flag below is initialized once during vSMP PCI initialization. */ | ||
| 30 | static int irq_routing_comply = 1; | ||
| 31 | |||
| 29 | #if defined CONFIG_PCI && defined CONFIG_PARAVIRT | 32 | #if defined CONFIG_PCI && defined CONFIG_PARAVIRT |
| 30 | /* | 33 | /* |
| 31 | * Interrupt control on vSMPowered systems: | 34 | * Interrupt control on vSMPowered systems: |
| @@ -101,6 +104,10 @@ static void __init set_vsmp_pv_ops(void) | |||
| 101 | #ifdef CONFIG_SMP | 104 | #ifdef CONFIG_SMP |
| 102 | if (cap & ctl & BIT(8)) { | 105 | if (cap & ctl & BIT(8)) { |
| 103 | ctl &= ~BIT(8); | 106 | ctl &= ~BIT(8); |
| 107 | |||
| 108 | /* Interrupt routing set to ignore */ | ||
| 109 | irq_routing_comply = 0; | ||
| 110 | |||
| 104 | #ifdef CONFIG_PROC_FS | 111 | #ifdef CONFIG_PROC_FS |
| 105 | /* Don't let users change irq affinity via procfs */ | 112 | /* Don't let users change irq affinity via procfs */ |
| 106 | no_irq_affinity = 1; | 113 | no_irq_affinity = 1; |
| @@ -218,7 +225,9 @@ static void vsmp_apic_post_init(void) | |||
| 218 | { | 225 | { |
| 219 | /* need to update phys_pkg_id */ | 226 | /* need to update phys_pkg_id */ |
| 220 | apic->phys_pkg_id = apicid_phys_pkg_id; | 227 | apic->phys_pkg_id = apicid_phys_pkg_id; |
| 221 | apic->vector_allocation_domain = fill_vector_allocation_domain; | 228 | |
| 229 | if (!irq_routing_comply) | ||
| 230 | apic->vector_allocation_domain = fill_vector_allocation_domain; | ||
| 222 | } | 231 | } |
| 223 | 232 | ||
| 224 | void __init vsmp_init(void) | 233 | void __init vsmp_init(void) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1f68c5831924..33e8c028842f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -503,7 +503,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | |||
| 503 | [number##_HIGH] = VMCS12_OFFSET(name)+4 | 503 | [number##_HIGH] = VMCS12_OFFSET(name)+4 |
| 504 | 504 | ||
| 505 | 505 | ||
| 506 | static const unsigned long shadow_read_only_fields[] = { | 506 | static unsigned long shadow_read_only_fields[] = { |
| 507 | /* | 507 | /* |
| 508 | * We do NOT shadow fields that are modified when L0 | 508 | * We do NOT shadow fields that are modified when L0 |
| 509 | * traps and emulates any vmx instruction (e.g. VMPTRLD, | 509 | * traps and emulates any vmx instruction (e.g. VMPTRLD, |
| @@ -526,10 +526,10 @@ static const unsigned long shadow_read_only_fields[] = { | |||
| 526 | GUEST_LINEAR_ADDRESS, | 526 | GUEST_LINEAR_ADDRESS, |
| 527 | GUEST_PHYSICAL_ADDRESS | 527 | GUEST_PHYSICAL_ADDRESS |
| 528 | }; | 528 | }; |
| 529 | static const int max_shadow_read_only_fields = | 529 | static int max_shadow_read_only_fields = |
| 530 | ARRAY_SIZE(shadow_read_only_fields); | 530 | ARRAY_SIZE(shadow_read_only_fields); |
| 531 | 531 | ||
| 532 | static const unsigned long shadow_read_write_fields[] = { | 532 | static unsigned long shadow_read_write_fields[] = { |
| 533 | GUEST_RIP, | 533 | GUEST_RIP, |
| 534 | GUEST_RSP, | 534 | GUEST_RSP, |
| 535 | GUEST_CR0, | 535 | GUEST_CR0, |
| @@ -558,7 +558,7 @@ static const unsigned long shadow_read_write_fields[] = { | |||
| 558 | HOST_FS_SELECTOR, | 558 | HOST_FS_SELECTOR, |
| 559 | HOST_GS_SELECTOR | 559 | HOST_GS_SELECTOR |
| 560 | }; | 560 | }; |
| 561 | static const int max_shadow_read_write_fields = | 561 | static int max_shadow_read_write_fields = |
| 562 | ARRAY_SIZE(shadow_read_write_fields); | 562 | ARRAY_SIZE(shadow_read_write_fields); |
| 563 | 563 | ||
| 564 | static const unsigned short vmcs_field_to_offset_table[] = { | 564 | static const unsigned short vmcs_field_to_offset_table[] = { |
| @@ -3009,6 +3009,41 @@ static void free_kvm_area(void) | |||
| 3009 | } | 3009 | } |
| 3010 | } | 3010 | } |
| 3011 | 3011 | ||
| 3012 | static void init_vmcs_shadow_fields(void) | ||
| 3013 | { | ||
| 3014 | int i, j; | ||
| 3015 | |||
| 3016 | /* No checks for read only fields yet */ | ||
| 3017 | |||
| 3018 | for (i = j = 0; i < max_shadow_read_write_fields; i++) { | ||
| 3019 | switch (shadow_read_write_fields[i]) { | ||
| 3020 | case GUEST_BNDCFGS: | ||
| 3021 | if (!vmx_mpx_supported()) | ||
| 3022 | continue; | ||
| 3023 | break; | ||
| 3024 | default: | ||
| 3025 | break; | ||
| 3026 | } | ||
| 3027 | |||
| 3028 | if (j < i) | ||
| 3029 | shadow_read_write_fields[j] = | ||
| 3030 | shadow_read_write_fields[i]; | ||
| 3031 | j++; | ||
| 3032 | } | ||
| 3033 | max_shadow_read_write_fields = j; | ||
| 3034 | |||
| 3035 | /* shadowed fields guest access without vmexit */ | ||
| 3036 | for (i = 0; i < max_shadow_read_write_fields; i++) { | ||
| 3037 | clear_bit(shadow_read_write_fields[i], | ||
| 3038 | vmx_vmwrite_bitmap); | ||
| 3039 | clear_bit(shadow_read_write_fields[i], | ||
| 3040 | vmx_vmread_bitmap); | ||
| 3041 | } | ||
| 3042 | for (i = 0; i < max_shadow_read_only_fields; i++) | ||
| 3043 | clear_bit(shadow_read_only_fields[i], | ||
| 3044 | vmx_vmread_bitmap); | ||
| 3045 | } | ||
| 3046 | |||
| 3012 | static __init int alloc_kvm_area(void) | 3047 | static __init int alloc_kvm_area(void) |
| 3013 | { | 3048 | { |
| 3014 | int cpu; | 3049 | int cpu; |
| @@ -3039,6 +3074,8 @@ static __init int hardware_setup(void) | |||
| 3039 | enable_vpid = 0; | 3074 | enable_vpid = 0; |
| 3040 | if (!cpu_has_vmx_shadow_vmcs()) | 3075 | if (!cpu_has_vmx_shadow_vmcs()) |
| 3041 | enable_shadow_vmcs = 0; | 3076 | enable_shadow_vmcs = 0; |
| 3077 | if (enable_shadow_vmcs) | ||
| 3078 | init_vmcs_shadow_fields(); | ||
| 3042 | 3079 | ||
| 3043 | if (!cpu_has_vmx_ept() || | 3080 | if (!cpu_has_vmx_ept() || |
| 3044 | !cpu_has_vmx_ept_4levels()) { | 3081 | !cpu_has_vmx_ept_4levels()) { |
| @@ -8803,14 +8840,6 @@ static int __init vmx_init(void) | |||
| 8803 | 8840 | ||
| 8804 | memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); | 8841 | memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); |
| 8805 | memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); | 8842 | memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); |
| 8806 | /* shadowed read/write fields */ | ||
| 8807 | for (i = 0; i < max_shadow_read_write_fields; i++) { | ||
| 8808 | clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap); | ||
| 8809 | clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap); | ||
| 8810 | } | ||
| 8811 | /* shadowed read only fields */ | ||
| 8812 | for (i = 0; i < max_shadow_read_only_fields; i++) | ||
| 8813 | clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap); | ||
| 8814 | 8843 | ||
| 8815 | /* | 8844 | /* |
| 8816 | * Allow direct access to the PC debug port (it is often used for I/O | 8845 | * Allow direct access to the PC debug port (it is often used for I/O |
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index c29c2c3ec0ad..b06f5f55ada9 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
| @@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) | |||
| 170 | acpi_status status; | 170 | acpi_status status; |
| 171 | int ret; | 171 | int ret; |
| 172 | 172 | ||
| 173 | if (pr->apic_id == -1) | ||
| 174 | return -ENODEV; | ||
| 175 | |||
| 173 | status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); | 176 | status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); |
| 174 | if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) | 177 | if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) |
| 175 | return -ENODEV; | 178 | return -ENODEV; |
| @@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
| 260 | } | 263 | } |
| 261 | 264 | ||
| 262 | apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); | 265 | apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); |
| 263 | if (apic_id < 0) { | 266 | if (apic_id < 0) |
| 264 | acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); | 267 | acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); |
| 265 | return -ENODEV; | ||
| 266 | } | ||
| 267 | pr->apic_id = apic_id; | 268 | pr->apic_id = apic_id; |
| 268 | 269 | ||
| 269 | cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); | 270 | cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d7d32c28829b..ad11ba4a412d 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -206,13 +206,13 @@ unlock: | |||
| 206 | spin_unlock_irqrestore(&ec->lock, flags); | 206 | spin_unlock_irqrestore(&ec->lock, flags); |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | static int acpi_ec_sync_query(struct acpi_ec *ec); | 209 | static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); |
| 210 | 210 | ||
| 211 | static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) | 211 | static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) |
| 212 | { | 212 | { |
| 213 | if (state & ACPI_EC_FLAG_SCI) { | 213 | if (state & ACPI_EC_FLAG_SCI) { |
| 214 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) | 214 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) |
| 215 | return acpi_ec_sync_query(ec); | 215 | return acpi_ec_sync_query(ec, NULL); |
| 216 | } | 216 | } |
| 217 | return 0; | 217 | return 0; |
| 218 | } | 218 | } |
| @@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void) | |||
| 443 | 443 | ||
| 444 | EXPORT_SYMBOL(ec_get_handle); | 444 | EXPORT_SYMBOL(ec_get_handle); |
| 445 | 445 | ||
| 446 | static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); | ||
| 447 | |||
| 448 | /* | 446 | /* |
| 449 | * Clears stale _Q events that might have accumulated in the EC. | 447 | * Process _Q events that might have accumulated in the EC. |
| 450 | * Run with locked ec mutex. | 448 | * Run with locked ec mutex. |
| 451 | */ | 449 | */ |
| 452 | static void acpi_ec_clear(struct acpi_ec *ec) | 450 | static void acpi_ec_clear(struct acpi_ec *ec) |
| @@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec) | |||
| 455 | u8 value = 0; | 453 | u8 value = 0; |
| 456 | 454 | ||
| 457 | for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { | 455 | for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { |
| 458 | status = acpi_ec_query_unlocked(ec, &value); | 456 | status = acpi_ec_sync_query(ec, &value); |
| 459 | if (status || !value) | 457 | if (status || !value) |
| 460 | break; | 458 | break; |
| 461 | } | 459 | } |
| @@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt) | |||
| 582 | kfree(handler); | 580 | kfree(handler); |
| 583 | } | 581 | } |
| 584 | 582 | ||
| 585 | static int acpi_ec_sync_query(struct acpi_ec *ec) | 583 | static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) |
| 586 | { | 584 | { |
| 587 | u8 value = 0; | 585 | u8 value = 0; |
| 588 | int status; | 586 | int status; |
| 589 | struct acpi_ec_query_handler *handler, *copy; | 587 | struct acpi_ec_query_handler *handler, *copy; |
| 590 | if ((status = acpi_ec_query_unlocked(ec, &value))) | 588 | |
| 589 | status = acpi_ec_query_unlocked(ec, &value); | ||
| 590 | if (data) | ||
| 591 | *data = value; | ||
| 592 | if (status) | ||
| 591 | return status; | 593 | return status; |
| 594 | |||
| 592 | list_for_each_entry(handler, &ec->list, node) { | 595 | list_for_each_entry(handler, &ec->list, node) { |
| 593 | if (value == handler->query_bit) { | 596 | if (value == handler->query_bit) { |
| 594 | /* have custom handler for this bit */ | 597 | /* have custom handler for this bit */ |
| @@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt) | |||
| 612 | if (!ec) | 615 | if (!ec) |
| 613 | return; | 616 | return; |
| 614 | mutex_lock(&ec->mutex); | 617 | mutex_lock(&ec->mutex); |
| 615 | acpi_ec_sync_query(ec); | 618 | acpi_ec_sync_query(ec, NULL); |
| 616 | mutex_unlock(&ec->mutex); | 619 | mutex_unlock(&ec->mutex); |
| 617 | } | 620 | } |
| 618 | 621 | ||
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 8986b9f22781..62ec61e8f84a 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
| @@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex); | |||
| 52 | static LIST_HEAD(deferred_probe_pending_list); | 52 | static LIST_HEAD(deferred_probe_pending_list); |
| 53 | static LIST_HEAD(deferred_probe_active_list); | 53 | static LIST_HEAD(deferred_probe_active_list); |
| 54 | static struct workqueue_struct *deferred_wq; | 54 | static struct workqueue_struct *deferred_wq; |
| 55 | static atomic_t deferred_trigger_count = ATOMIC_INIT(0); | ||
| 55 | 56 | ||
| 56 | /** | 57 | /** |
| 57 | * deferred_probe_work_func() - Retry probing devices in the active list. | 58 | * deferred_probe_work_func() - Retry probing devices in the active list. |
| @@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false; | |||
| 135 | * This functions moves all devices from the pending list to the active | 136 | * This functions moves all devices from the pending list to the active |
| 136 | * list and schedules the deferred probe workqueue to process them. It | 137 | * list and schedules the deferred probe workqueue to process them. It |
| 137 | * should be called anytime a driver is successfully bound to a device. | 138 | * should be called anytime a driver is successfully bound to a device. |
| 139 | * | ||
| 140 | * Note, there is a race condition in multi-threaded probe. In the case where | ||
| 141 | * more than one device is probing at the same time, it is possible for one | ||
| 142 | * probe to complete successfully while another is about to defer. If the second | ||
| 143 | * depends on the first, then it will get put on the pending list after the | ||
| 144 | * trigger event has already occured and will be stuck there. | ||
| 145 | * | ||
| 146 | * The atomic 'deferred_trigger_count' is used to determine if a successful | ||
| 147 | * trigger has occurred in the midst of probing a driver. If the trigger count | ||
| 148 | * changes in the midst of a probe, then deferred processing should be triggered | ||
| 149 | * again. | ||
| 138 | */ | 150 | */ |
| 139 | static void driver_deferred_probe_trigger(void) | 151 | static void driver_deferred_probe_trigger(void) |
| 140 | { | 152 | { |
| @@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void) | |||
| 147 | * into the active list so they can be retried by the workqueue | 159 | * into the active list so they can be retried by the workqueue |
| 148 | */ | 160 | */ |
| 149 | mutex_lock(&deferred_probe_mutex); | 161 | mutex_lock(&deferred_probe_mutex); |
| 162 | atomic_inc(&deferred_trigger_count); | ||
| 150 | list_splice_tail_init(&deferred_probe_pending_list, | 163 | list_splice_tail_init(&deferred_probe_pending_list, |
| 151 | &deferred_probe_active_list); | 164 | &deferred_probe_active_list); |
| 152 | mutex_unlock(&deferred_probe_mutex); | 165 | mutex_unlock(&deferred_probe_mutex); |
| @@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); | |||
| 265 | static int really_probe(struct device *dev, struct device_driver *drv) | 278 | static int really_probe(struct device *dev, struct device_driver *drv) |
| 266 | { | 279 | { |
| 267 | int ret = 0; | 280 | int ret = 0; |
| 281 | int local_trigger_count = atomic_read(&deferred_trigger_count); | ||
| 268 | 282 | ||
| 269 | atomic_inc(&probe_count); | 283 | atomic_inc(&probe_count); |
| 270 | pr_debug("bus: '%s': %s: probing driver %s with device %s\n", | 284 | pr_debug("bus: '%s': %s: probing driver %s with device %s\n", |
| @@ -310,6 +324,9 @@ probe_failed: | |||
| 310 | /* Driver requested deferred probing */ | 324 | /* Driver requested deferred probing */ |
| 311 | dev_info(dev, "Driver %s requests probe deferral\n", drv->name); | 325 | dev_info(dev, "Driver %s requests probe deferral\n", drv->name); |
| 312 | driver_deferred_probe_add(dev); | 326 | driver_deferred_probe_add(dev); |
| 327 | /* Did a trigger occur while probing? Need to re-trigger if yes */ | ||
| 328 | if (local_trigger_count != atomic_read(&deferred_trigger_count)) | ||
| 329 | driver_deferred_probe_trigger(); | ||
| 313 | } else if (ret != -ENODEV && ret != -ENXIO) { | 330 | } else if (ret != -ENODEV && ret != -ENXIO) { |
| 314 | /* driver matched but the probe failed */ | 331 | /* driver matched but the probe failed */ |
| 315 | printk(KERN_WARNING | 332 | printk(KERN_WARNING |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index e714709704e4..5b47210889e0 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
| 14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/of_device.h> | 15 | #include <linux/of_device.h> |
| 16 | #include <linux/of_irq.h> | ||
| 16 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
| 18 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
| @@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) | |||
| 87 | return -ENXIO; | 88 | return -ENXIO; |
| 88 | return dev->archdata.irqs[num]; | 89 | return dev->archdata.irqs[num]; |
| 89 | #else | 90 | #else |
| 90 | struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); | 91 | struct resource *r; |
| 92 | if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) | ||
| 93 | return of_irq_get(dev->dev.of_node, num); | ||
| 94 | |||
| 95 | r = platform_get_resource(dev, IORESOURCE_IRQ, num); | ||
| 91 | 96 | ||
| 92 | return r ? r->start : -ENXIO; | 97 | return r ? r->start : -ENXIO; |
| 93 | #endif | 98 | #endif |
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 57e823c44d2a..5163ec13429d 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
| @@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI]; | |||
| 66 | static struct clock_event_device __percpu *arch_timer_evt; | 66 | static struct clock_event_device __percpu *arch_timer_evt; |
| 67 | 67 | ||
| 68 | static bool arch_timer_use_virtual = true; | 68 | static bool arch_timer_use_virtual = true; |
| 69 | static bool arch_timer_c3stop; | ||
| 69 | static bool arch_timer_mem_use_virtual; | 70 | static bool arch_timer_mem_use_virtual; |
| 70 | 71 | ||
| 71 | /* | 72 | /* |
| @@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type, | |||
| 263 | clk->features = CLOCK_EVT_FEAT_ONESHOT; | 264 | clk->features = CLOCK_EVT_FEAT_ONESHOT; |
| 264 | 265 | ||
| 265 | if (type == ARCH_CP15_TIMER) { | 266 | if (type == ARCH_CP15_TIMER) { |
| 266 | clk->features |= CLOCK_EVT_FEAT_C3STOP; | 267 | if (arch_timer_c3stop) |
| 268 | clk->features |= CLOCK_EVT_FEAT_C3STOP; | ||
| 267 | clk->name = "arch_sys_timer"; | 269 | clk->name = "arch_sys_timer"; |
| 268 | clk->rating = 450; | 270 | clk->rating = 450; |
| 269 | clk->cpumask = cpumask_of(smp_processor_id()); | 271 | clk->cpumask = cpumask_of(smp_processor_id()); |
| @@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np) | |||
| 665 | } | 667 | } |
| 666 | } | 668 | } |
| 667 | 669 | ||
| 670 | arch_timer_c3stop = !of_property_read_bool(np, "always-on"); | ||
| 671 | |||
| 668 | arch_timer_register(); | 672 | arch_timer_register(); |
| 669 | arch_timer_common_init(); | 673 | arch_timer_common_init(); |
| 670 | } | 674 | } |
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c index ca81809d159d..7ce442148c3f 100644 --- a/drivers/clocksource/zevio-timer.c +++ b/drivers/clocksource/zevio-timer.c | |||
| @@ -212,4 +212,9 @@ error_free: | |||
| 212 | return ret; | 212 | return ret; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add); | 215 | static void __init zevio_timer_init(struct device_node *node) |
| 216 | { | ||
| 217 | BUG_ON(zevio_timer_add(node)); | ||
| 218 | } | ||
| 219 | |||
| 220 | CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init); | ||
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index d00e5d1abd25..5c4369b5d834 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c | |||
| @@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, | |||
| 242 | * Sets a new clock ratio. | 242 | * Sets a new clock ratio. |
| 243 | */ | 243 | */ |
| 244 | 244 | ||
| 245 | static void longhaul_setstate(struct cpufreq_policy *policy, | 245 | static int longhaul_setstate(struct cpufreq_policy *policy, |
| 246 | unsigned int table_index) | 246 | unsigned int table_index) |
| 247 | { | 247 | { |
| 248 | unsigned int mults_index; | 248 | unsigned int mults_index; |
| @@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy, | |||
| 258 | /* Safety precautions */ | 258 | /* Safety precautions */ |
| 259 | mult = mults[mults_index & 0x1f]; | 259 | mult = mults[mults_index & 0x1f]; |
| 260 | if (mult == -1) | 260 | if (mult == -1) |
| 261 | return; | 261 | return -EINVAL; |
| 262 | |||
| 262 | speed = calc_speed(mult); | 263 | speed = calc_speed(mult); |
| 263 | if ((speed > highest_speed) || (speed < lowest_speed)) | 264 | if ((speed > highest_speed) || (speed < lowest_speed)) |
| 264 | return; | 265 | return -EINVAL; |
| 266 | |||
| 265 | /* Voltage transition before frequency transition? */ | 267 | /* Voltage transition before frequency transition? */ |
| 266 | if (can_scale_voltage && longhaul_index < table_index) | 268 | if (can_scale_voltage && longhaul_index < table_index) |
| 267 | dir = 1; | 269 | dir = 1; |
| @@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy, | |||
| 269 | freqs.old = calc_speed(longhaul_get_cpu_mult()); | 271 | freqs.old = calc_speed(longhaul_get_cpu_mult()); |
| 270 | freqs.new = speed; | 272 | freqs.new = speed; |
| 271 | 273 | ||
| 272 | cpufreq_freq_transition_begin(policy, &freqs); | ||
| 273 | |||
| 274 | pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", | 274 | pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", |
| 275 | fsb, mult/10, mult%10, print_speed(speed/1000)); | 275 | fsb, mult/10, mult%10, print_speed(speed/1000)); |
| 276 | retry_loop: | 276 | retry_loop: |
| @@ -385,12 +385,14 @@ retry_loop: | |||
| 385 | goto retry_loop; | 385 | goto retry_loop; |
| 386 | } | 386 | } |
| 387 | } | 387 | } |
| 388 | /* Report true CPU frequency */ | ||
| 389 | cpufreq_freq_transition_end(policy, &freqs, 0); | ||
| 390 | 388 | ||
| 391 | if (!bm_timeout) | 389 | if (!bm_timeout) { |
| 392 | printk(KERN_INFO PFX "Warning: Timeout while waiting for " | 390 | printk(KERN_INFO PFX "Warning: Timeout while waiting for " |
| 393 | "idle PCI bus.\n"); | 391 | "idle PCI bus.\n"); |
| 392 | return -EBUSY; | ||
| 393 | } | ||
| 394 | |||
| 395 | return 0; | ||
| 394 | } | 396 | } |
| 395 | 397 | ||
| 396 | /* | 398 | /* |
| @@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy, | |||
| 631 | unsigned int i; | 633 | unsigned int i; |
| 632 | unsigned int dir = 0; | 634 | unsigned int dir = 0; |
| 633 | u8 vid, current_vid; | 635 | u8 vid, current_vid; |
| 636 | int retval = 0; | ||
| 634 | 637 | ||
| 635 | if (!can_scale_voltage) | 638 | if (!can_scale_voltage) |
| 636 | longhaul_setstate(policy, table_index); | 639 | retval = longhaul_setstate(policy, table_index); |
| 637 | else { | 640 | else { |
| 638 | /* On test system voltage transitions exceeding single | 641 | /* On test system voltage transitions exceeding single |
| 639 | * step up or down were turning motherboard off. Both | 642 | * step up or down were turning motherboard off. Both |
| @@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy, | |||
| 648 | while (i != table_index) { | 651 | while (i != table_index) { |
| 649 | vid = (longhaul_table[i].driver_data >> 8) & 0x1f; | 652 | vid = (longhaul_table[i].driver_data >> 8) & 0x1f; |
| 650 | if (vid != current_vid) { | 653 | if (vid != current_vid) { |
| 651 | longhaul_setstate(policy, i); | 654 | retval = longhaul_setstate(policy, i); |
| 652 | current_vid = vid; | 655 | current_vid = vid; |
| 653 | msleep(200); | 656 | msleep(200); |
| 654 | } | 657 | } |
| @@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy, | |||
| 657 | else | 660 | else |
| 658 | i--; | 661 | i--; |
| 659 | } | 662 | } |
| 660 | longhaul_setstate(policy, table_index); | 663 | retval = longhaul_setstate(policy, table_index); |
| 661 | } | 664 | } |
| 665 | |||
| 662 | longhaul_index = table_index; | 666 | longhaul_index = table_index; |
| 663 | return 0; | 667 | return retval; |
| 664 | } | 668 | } |
| 665 | 669 | ||
| 666 | 670 | ||
| @@ -968,7 +972,15 @@ static void __exit longhaul_exit(void) | |||
| 968 | 972 | ||
| 969 | for (i = 0; i < numscales; i++) { | 973 | for (i = 0; i < numscales; i++) { |
| 970 | if (mults[i] == maxmult) { | 974 | if (mults[i] == maxmult) { |
| 975 | struct cpufreq_freqs freqs; | ||
| 976 | |||
| 977 | freqs.old = policy->cur; | ||
| 978 | freqs.new = longhaul_table[i].frequency; | ||
| 979 | freqs.flags = 0; | ||
| 980 | |||
| 981 | cpufreq_freq_transition_begin(policy, &freqs); | ||
| 971 | longhaul_setstate(policy, i); | 982 | longhaul_setstate(policy, i); |
| 983 | cpufreq_freq_transition_end(policy, &freqs, 0); | ||
| 972 | break; | 984 | break; |
| 973 | } | 985 | } |
| 974 | } | 986 | } |
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index 49f120e1bc7b..78904e6ca4a0 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c | |||
| @@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i) | |||
| 138 | static int powernow_k6_target(struct cpufreq_policy *policy, | 138 | static int powernow_k6_target(struct cpufreq_policy *policy, |
| 139 | unsigned int best_i) | 139 | unsigned int best_i) |
| 140 | { | 140 | { |
| 141 | struct cpufreq_freqs freqs; | ||
| 142 | 141 | ||
| 143 | if (clock_ratio[best_i].driver_data > max_multiplier) { | 142 | if (clock_ratio[best_i].driver_data > max_multiplier) { |
| 144 | printk(KERN_ERR PFX "invalid target frequency\n"); | 143 | printk(KERN_ERR PFX "invalid target frequency\n"); |
| 145 | return -EINVAL; | 144 | return -EINVAL; |
| 146 | } | 145 | } |
| 147 | 146 | ||
| 148 | freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); | ||
| 149 | freqs.new = busfreq * clock_ratio[best_i].driver_data; | ||
| 150 | |||
| 151 | cpufreq_freq_transition_begin(policy, &freqs); | ||
| 152 | |||
| 153 | powernow_k6_set_cpu_multiplier(best_i); | 147 | powernow_k6_set_cpu_multiplier(best_i); |
| 154 | 148 | ||
| 155 | cpufreq_freq_transition_end(policy, &freqs, 0); | ||
| 156 | |||
| 157 | return 0; | 149 | return 0; |
| 158 | } | 150 | } |
| 159 | 151 | ||
| @@ -227,9 +219,20 @@ have_busfreq: | |||
| 227 | static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) | 219 | static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) |
| 228 | { | 220 | { |
| 229 | unsigned int i; | 221 | unsigned int i; |
| 230 | for (i = 0; i < 8; i++) { | 222 | |
| 231 | if (i == max_multiplier) | 223 | for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { |
| 224 | if (clock_ratio[i].driver_data == max_multiplier) { | ||
| 225 | struct cpufreq_freqs freqs; | ||
| 226 | |||
| 227 | freqs.old = policy->cur; | ||
| 228 | freqs.new = clock_ratio[i].frequency; | ||
| 229 | freqs.flags = 0; | ||
| 230 | |||
| 231 | cpufreq_freq_transition_begin(policy, &freqs); | ||
| 232 | powernow_k6_target(policy, i); | 232 | powernow_k6_target(policy, i); |
| 233 | cpufreq_freq_transition_end(policy, &freqs, 0); | ||
| 234 | break; | ||
| 235 | } | ||
| 233 | } | 236 | } |
| 234 | return 0; | 237 | return 0; |
| 235 | } | 238 | } |
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index f911645c3f6d..e61e224475ad 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c | |||
| @@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) | |||
| 269 | 269 | ||
| 270 | freqs.new = powernow_table[index].frequency; | 270 | freqs.new = powernow_table[index].frequency; |
| 271 | 271 | ||
| 272 | cpufreq_freq_transition_begin(policy, &freqs); | ||
| 273 | |||
| 274 | /* Now do the magic poking into the MSRs. */ | 272 | /* Now do the magic poking into the MSRs. */ |
| 275 | 273 | ||
| 276 | if (have_a0 == 1) /* A0 errata 5 */ | 274 | if (have_a0 == 1) /* A0 errata 5 */ |
| @@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) | |||
| 290 | if (have_a0 == 1) | 288 | if (have_a0 == 1) |
| 291 | local_irq_enable(); | 289 | local_irq_enable(); |
| 292 | 290 | ||
| 293 | cpufreq_freq_transition_end(policy, &freqs, 0); | ||
| 294 | |||
| 295 | return 0; | 291 | return 0; |
| 296 | } | 292 | } |
| 297 | 293 | ||
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index a1ca3dd04a8e..0af618abebaf 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c | |||
| @@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 138 | struct cpufreq_frequency_table *table; | 138 | struct cpufreq_frequency_table *table; |
| 139 | struct cpu_data *data; | 139 | struct cpu_data *data; |
| 140 | unsigned int cpu = policy->cpu; | 140 | unsigned int cpu = policy->cpu; |
| 141 | u64 transition_latency_hz; | ||
| 141 | 142 | ||
| 142 | np = of_get_cpu_node(cpu, NULL); | 143 | np = of_get_cpu_node(cpu, NULL); |
| 143 | if (!np) | 144 | if (!np) |
| @@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 205 | for_each_cpu(i, per_cpu(cpu_mask, cpu)) | 206 | for_each_cpu(i, per_cpu(cpu_mask, cpu)) |
| 206 | per_cpu(cpu_data, i) = data; | 207 | per_cpu(cpu_data, i) = data; |
| 207 | 208 | ||
| 209 | transition_latency_hz = 12ULL * NSEC_PER_SEC; | ||
| 208 | policy->cpuinfo.transition_latency = | 210 | policy->cpuinfo.transition_latency = |
| 209 | (12ULL * NSEC_PER_SEC) / fsl_get_sys_freq(); | 211 | do_div(transition_latency_hz, fsl_get_sys_freq()); |
| 212 | |||
| 210 | of_node_put(np); | 213 | of_node_put(np); |
| 211 | 214 | ||
| 212 | return 0; | 215 | return 0; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index e930d4fe29c7..1ef5ab9c9d51 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
| @@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
| 145 | 145 | ||
| 146 | plane->crtc = crtc; | 146 | plane->crtc = crtc; |
| 147 | plane->fb = crtc->primary->fb; | 147 | plane->fb = crtc->primary->fb; |
| 148 | drm_framebuffer_reference(plane->fb); | ||
| 148 | 149 | ||
| 149 | return 0; | 150 | return 0; |
| 150 | } | 151 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index c786cd4f457b..2a3ad24276f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | |||
| @@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, | |||
| 263 | buffer->sgt = sgt; | 263 | buffer->sgt = sgt; |
| 264 | exynos_gem_obj->base.import_attach = attach; | 264 | exynos_gem_obj->base.import_attach = attach; |
| 265 | 265 | ||
| 266 | DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, | 266 | DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, |
| 267 | buffer->size); | 267 | buffer->size); |
| 268 | 268 | ||
| 269 | return &exynos_gem_obj->base; | 269 | return &exynos_gem_obj->base; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index eb73e3bf2a0c..4ac438187568 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c | |||
| @@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev) | |||
| 1426 | 1426 | ||
| 1427 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1427 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1428 | dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); | 1428 | dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); |
| 1429 | if (!dsi->reg_base) { | 1429 | if (IS_ERR(dsi->reg_base)) { |
| 1430 | dev_err(&pdev->dev, "failed to remap io region\n"); | 1430 | dev_err(&pdev->dev, "failed to remap io region\n"); |
| 1431 | return -EADDRNOTAVAIL; | 1431 | return PTR_ERR(dsi->reg_base); |
| 1432 | } | 1432 | } |
| 1433 | 1433 | ||
| 1434 | dsi->phy = devm_phy_get(&pdev->dev, "dsim"); | 1434 | dsi->phy = devm_phy_get(&pdev->dev, "dsim"); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 7afead9c3f30..852f2dadaebd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
| @@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos) | |||
| 220 | 220 | ||
| 221 | win_data->enabled = true; | 221 | win_data->enabled = true; |
| 222 | 222 | ||
| 223 | DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr); | 223 | DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr); |
| 224 | 224 | ||
| 225 | if (ctx->vblank_on) | 225 | if (ctx->vblank_on) |
| 226 | schedule_work(&ctx->work); | 226 | schedule_work(&ctx->work); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ab5e93c30aa2..62a5c3627b90 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -50,7 +50,7 @@ bool intel_enable_ppgtt(struct drm_device *dev, bool full) | |||
| 50 | 50 | ||
| 51 | /* Full ppgtt disabled by default for now due to issues. */ | 51 | /* Full ppgtt disabled by default for now due to issues. */ |
| 52 | if (full) | 52 | if (full) |
| 53 | return false; /* HAS_PPGTT(dev) */ | 53 | return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2); |
| 54 | else | 54 | else |
| 55 | return HAS_ALIASING_PPGTT(dev); | 55 | return HAS_ALIASING_PPGTT(dev); |
| 56 | } | 56 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7753249b3a95..f98ba4e6e70b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, | |||
| 1362 | spin_lock(&dev_priv->irq_lock); | 1362 | spin_lock(&dev_priv->irq_lock); |
| 1363 | for (i = 1; i < HPD_NUM_PINS; i++) { | 1363 | for (i = 1; i < HPD_NUM_PINS; i++) { |
| 1364 | 1364 | ||
| 1365 | WARN_ONCE(hpd[i] & hotplug_trigger && | 1365 | if (hpd[i] & hotplug_trigger && |
| 1366 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, | 1366 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { |
| 1367 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", | 1367 | /* |
| 1368 | hotplug_trigger, i, hpd[i]); | 1368 | * On GMCH platforms the interrupt mask bits only |
| 1369 | * prevent irq generation, not the setting of the | ||
| 1370 | * hotplug bits itself. So only WARN about unexpected | ||
| 1371 | * interrupts on saner platforms. | ||
| 1372 | */ | ||
| 1373 | WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), | ||
| 1374 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", | ||
| 1375 | hotplug_trigger, i, hpd[i]); | ||
| 1376 | |||
| 1377 | continue; | ||
| 1378 | } | ||
| 1369 | 1379 | ||
| 1370 | if (!(hpd[i] & hotplug_trigger) || | 1380 | if (!(hpd[i] & hotplug_trigger) || |
| 1371 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | 1381 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9f5b18d9d885..c77af69c2d8f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -827,6 +827,7 @@ enum punit_power_well { | |||
| 827 | # define MI_FLUSH_ENABLE (1 << 12) | 827 | # define MI_FLUSH_ENABLE (1 << 12) |
| 828 | # define ASYNC_FLIP_PERF_DISABLE (1 << 14) | 828 | # define ASYNC_FLIP_PERF_DISABLE (1 << 14) |
| 829 | # define MODE_IDLE (1 << 9) | 829 | # define MODE_IDLE (1 << 9) |
| 830 | # define STOP_RING (1 << 8) | ||
| 830 | 831 | ||
| 831 | #define GEN6_GT_MODE 0x20d0 | 832 | #define GEN6_GT_MODE 0x20d0 |
| 832 | #define GEN7_GT_MODE 0x7008 | 833 | #define GEN7_GT_MODE 0x7008 |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dae976f51d83..69bcc42a0e44 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -9654,11 +9654,22 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 9654 | PIPE_CONF_CHECK_I(pipe_src_w); | 9654 | PIPE_CONF_CHECK_I(pipe_src_w); |
| 9655 | PIPE_CONF_CHECK_I(pipe_src_h); | 9655 | PIPE_CONF_CHECK_I(pipe_src_h); |
| 9656 | 9656 | ||
| 9657 | PIPE_CONF_CHECK_I(gmch_pfit.control); | 9657 | /* |
| 9658 | /* pfit ratios are autocomputed by the hw on gen4+ */ | 9658 | * FIXME: BIOS likes to set up a cloned config with lvds+external |
| 9659 | if (INTEL_INFO(dev)->gen < 4) | 9659 | * screen. Since we don't yet re-compute the pipe config when moving |
| 9660 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); | 9660 | * just the lvds port away to another pipe the sw tracking won't match. |
| 9661 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); | 9661 | * |
| 9662 | * Proper atomic modesets with recomputed global state will fix this. | ||
| 9663 | * Until then just don't check gmch state for inherited modes. | ||
| 9664 | */ | ||
| 9665 | if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { | ||
| 9666 | PIPE_CONF_CHECK_I(gmch_pfit.control); | ||
| 9667 | /* pfit ratios are autocomputed by the hw on gen4+ */ | ||
| 9668 | if (INTEL_INFO(dev)->gen < 4) | ||
| 9669 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); | ||
| 9670 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); | ||
| 9671 | } | ||
| 9672 | |||
| 9662 | PIPE_CONF_CHECK_I(pch_pfit.enabled); | 9673 | PIPE_CONF_CHECK_I(pch_pfit.enabled); |
| 9663 | if (current_config->pch_pfit.enabled) { | 9674 | if (current_config->pch_pfit.enabled) { |
| 9664 | PIPE_CONF_CHECK_I(pch_pfit.pos); | 9675 | PIPE_CONF_CHECK_I(pch_pfit.pos); |
| @@ -11616,6 +11627,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
| 11616 | base.head) { | 11627 | base.head) { |
| 11617 | memset(&crtc->config, 0, sizeof(crtc->config)); | 11628 | memset(&crtc->config, 0, sizeof(crtc->config)); |
| 11618 | 11629 | ||
| 11630 | crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; | ||
| 11631 | |||
| 11619 | crtc->active = dev_priv->display.get_pipe_config(crtc, | 11632 | crtc->active = dev_priv->display.get_pipe_config(crtc, |
| 11620 | &crtc->config); | 11633 | &crtc->config); |
| 11621 | 11634 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d2a55884ad52..dfa85289f28f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -3619,7 +3619,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
| 3619 | { | 3619 | { |
| 3620 | struct drm_connector *connector = &intel_connector->base; | 3620 | struct drm_connector *connector = &intel_connector->base; |
| 3621 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3621 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
| 3622 | struct drm_device *dev = intel_dig_port->base.base.dev; | 3622 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
| 3623 | struct drm_device *dev = intel_encoder->base.dev; | ||
| 3623 | struct drm_i915_private *dev_priv = dev->dev_private; | 3624 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3624 | struct drm_display_mode *fixed_mode = NULL; | 3625 | struct drm_display_mode *fixed_mode = NULL; |
| 3625 | bool has_dpcd; | 3626 | bool has_dpcd; |
| @@ -3629,6 +3630,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
| 3629 | if (!is_edp(intel_dp)) | 3630 | if (!is_edp(intel_dp)) |
| 3630 | return true; | 3631 | return true; |
| 3631 | 3632 | ||
| 3633 | /* The VDD bit needs a power domain reference, so if the bit is already | ||
| 3634 | * enabled when we boot, grab this reference. */ | ||
| 3635 | if (edp_have_panel_vdd(intel_dp)) { | ||
| 3636 | enum intel_display_power_domain power_domain; | ||
| 3637 | power_domain = intel_display_port_power_domain(intel_encoder); | ||
| 3638 | intel_display_power_get(dev_priv, power_domain); | ||
| 3639 | } | ||
| 3640 | |||
| 3632 | /* Cache DPCD and EDID for edp. */ | 3641 | /* Cache DPCD and EDID for edp. */ |
| 3633 | intel_edp_panel_vdd_on(intel_dp); | 3642 | intel_edp_panel_vdd_on(intel_dp); |
| 3634 | has_dpcd = intel_dp_get_dpcd(intel_dp); | 3643 | has_dpcd = intel_dp_get_dpcd(intel_dp); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0542de982260..328b1a70264b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -236,7 +236,8 @@ struct intel_crtc_config { | |||
| 236 | * tracked with quirk flags so that fastboot and state checker can act | 236 | * tracked with quirk flags so that fastboot and state checker can act |
| 237 | * accordingly. | 237 | * accordingly. |
| 238 | */ | 238 | */ |
| 239 | #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ | 239 | #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ |
| 240 | #define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */ | ||
| 240 | unsigned long quirks; | 241 | unsigned long quirks; |
| 241 | 242 | ||
| 242 | /* User requested mode, only valid as a starting point to | 243 | /* User requested mode, only valid as a starting point to |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index b4d44e62f0c7..fce4a0d93c0b 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
| @@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
| 132 | 132 | ||
| 133 | mutex_lock(&dev->struct_mutex); | 133 | mutex_lock(&dev->struct_mutex); |
| 134 | 134 | ||
| 135 | if (intel_fb && | ||
| 136 | (sizes->fb_width > intel_fb->base.width || | ||
| 137 | sizes->fb_height > intel_fb->base.height)) { | ||
| 138 | DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d)," | ||
| 139 | " releasing it\n", | ||
| 140 | intel_fb->base.width, intel_fb->base.height, | ||
| 141 | sizes->fb_width, sizes->fb_height); | ||
| 142 | drm_framebuffer_unreference(&intel_fb->base); | ||
| 143 | intel_fb = ifbdev->fb = NULL; | ||
| 144 | } | ||
| 135 | if (!intel_fb || WARN_ON(!intel_fb->obj)) { | 145 | if (!intel_fb || WARN_ON(!intel_fb->obj)) { |
| 136 | DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); | 146 | DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); |
| 137 | ret = intelfb_alloc(helper, sizes); | 147 | ret = intelfb_alloc(helper, sizes); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b0413e190625..157267aa3561 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) | |||
| 821 | } | 821 | } |
| 822 | } | 822 | } |
| 823 | 823 | ||
| 824 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi) | 824 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) |
| 825 | { | 825 | { |
| 826 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | 826 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); |
| 827 | 827 | ||
| 828 | if (!hdmi->has_hdmi_sink || IS_G4X(dev)) | 828 | if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) |
| 829 | return 165000; | 829 | return 165000; |
| 830 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) | 830 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) |
| 831 | return 300000; | 831 | return 300000; |
| @@ -837,7 +837,8 @@ static enum drm_mode_status | |||
| 837 | intel_hdmi_mode_valid(struct drm_connector *connector, | 837 | intel_hdmi_mode_valid(struct drm_connector *connector, |
| 838 | struct drm_display_mode *mode) | 838 | struct drm_display_mode *mode) |
| 839 | { | 839 | { |
| 840 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) | 840 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector), |
| 841 | true)) | ||
| 841 | return MODE_CLOCK_HIGH; | 842 | return MODE_CLOCK_HIGH; |
| 842 | if (mode->clock < 20000) | 843 | if (mode->clock < 20000) |
| 843 | return MODE_CLOCK_LOW; | 844 | return MODE_CLOCK_LOW; |
| @@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 879 | struct drm_device *dev = encoder->base.dev; | 880 | struct drm_device *dev = encoder->base.dev; |
| 880 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; | 881 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; |
| 881 | int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; | 882 | int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; |
| 882 | int portclock_limit = hdmi_portclock_limit(intel_hdmi); | 883 | int portclock_limit = hdmi_portclock_limit(intel_hdmi, false); |
| 883 | int desired_bpp; | 884 | int desired_bpp; |
| 884 | 885 | ||
| 885 | if (intel_hdmi->color_range_auto) { | 886 | if (intel_hdmi->color_range_auto) { |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6bc68bdcf433..79fb4cc2137c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) | |||
| 437 | I915_WRITE(HWS_PGA, addr); | 437 | I915_WRITE(HWS_PGA, addr); |
| 438 | } | 438 | } |
| 439 | 439 | ||
| 440 | static int init_ring_common(struct intel_ring_buffer *ring) | 440 | static bool stop_ring(struct intel_ring_buffer *ring) |
| 441 | { | 441 | { |
| 442 | struct drm_device *dev = ring->dev; | 442 | struct drm_i915_private *dev_priv = to_i915(ring->dev); |
| 443 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 444 | struct drm_i915_gem_object *obj = ring->obj; | ||
| 445 | int ret = 0; | ||
| 446 | u32 head; | ||
| 447 | 443 | ||
| 448 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | 444 | if (!IS_GEN2(ring->dev)) { |
| 445 | I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); | ||
| 446 | if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { | ||
| 447 | DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); | ||
| 448 | return false; | ||
| 449 | } | ||
| 450 | } | ||
| 449 | 451 | ||
| 450 | /* Stop the ring if it's running. */ | ||
| 451 | I915_WRITE_CTL(ring, 0); | 452 | I915_WRITE_CTL(ring, 0); |
| 452 | I915_WRITE_HEAD(ring, 0); | 453 | I915_WRITE_HEAD(ring, 0); |
| 453 | ring->write_tail(ring, 0); | 454 | ring->write_tail(ring, 0); |
| 454 | if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) | ||
| 455 | DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); | ||
| 456 | 455 | ||
| 457 | if (I915_NEED_GFX_HWS(dev)) | 456 | if (!IS_GEN2(ring->dev)) { |
| 458 | intel_ring_setup_status_page(ring); | 457 | (void)I915_READ_CTL(ring); |
| 459 | else | 458 | I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); |
| 460 | ring_setup_phys_status_page(ring); | 459 | } |
| 461 | 460 | ||
| 462 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 461 | return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; |
| 462 | } | ||
| 463 | 463 | ||
| 464 | /* G45 ring initialization fails to reset head to zero */ | 464 | static int init_ring_common(struct intel_ring_buffer *ring) |
| 465 | if (head != 0) { | 465 | { |
| 466 | struct drm_device *dev = ring->dev; | ||
| 467 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 468 | struct drm_i915_gem_object *obj = ring->obj; | ||
| 469 | int ret = 0; | ||
| 470 | |||
| 471 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | ||
| 472 | |||
| 473 | if (!stop_ring(ring)) { | ||
| 474 | /* G45 ring initialization often fails to reset head to zero */ | ||
| 466 | DRM_DEBUG_KMS("%s head not reset to zero " | 475 | DRM_DEBUG_KMS("%s head not reset to zero " |
| 467 | "ctl %08x head %08x tail %08x start %08x\n", | 476 | "ctl %08x head %08x tail %08x start %08x\n", |
| 468 | ring->name, | 477 | ring->name, |
| @@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
| 471 | I915_READ_TAIL(ring), | 480 | I915_READ_TAIL(ring), |
| 472 | I915_READ_START(ring)); | 481 | I915_READ_START(ring)); |
| 473 | 482 | ||
| 474 | I915_WRITE_HEAD(ring, 0); | 483 | if (!stop_ring(ring)) { |
| 475 | |||
| 476 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { | ||
| 477 | DRM_ERROR("failed to set %s head to zero " | 484 | DRM_ERROR("failed to set %s head to zero " |
| 478 | "ctl %08x head %08x tail %08x start %08x\n", | 485 | "ctl %08x head %08x tail %08x start %08x\n", |
| 479 | ring->name, | 486 | ring->name, |
| @@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
| 481 | I915_READ_HEAD(ring), | 488 | I915_READ_HEAD(ring), |
| 482 | I915_READ_TAIL(ring), | 489 | I915_READ_TAIL(ring), |
| 483 | I915_READ_START(ring)); | 490 | I915_READ_START(ring)); |
| 491 | ret = -EIO; | ||
| 492 | goto out; | ||
| 484 | } | 493 | } |
| 485 | } | 494 | } |
| 486 | 495 | ||
| 496 | if (I915_NEED_GFX_HWS(dev)) | ||
| 497 | intel_ring_setup_status_page(ring); | ||
| 498 | else | ||
| 499 | ring_setup_phys_status_page(ring); | ||
| 500 | |||
| 487 | /* Initialize the ring. This must happen _after_ we've cleared the ring | 501 | /* Initialize the ring. This must happen _after_ we've cleared the ring |
| 488 | * registers with the above sequence (the readback of the HEAD registers | 502 | * registers with the above sequence (the readback of the HEAD registers |
| 489 | * also enforces ordering), otherwise the hw might lose the new ring | 503 | * also enforces ordering), otherwise the hw might lose the new ring |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 270a6a973438..2b91c4b4d34b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -34,6 +34,7 @@ struct intel_hw_status_page { | |||
| 34 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | 34 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
| 35 | 35 | ||
| 36 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) | 36 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) |
| 37 | #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) | ||
| 37 | 38 | ||
| 38 | enum intel_ring_hangcheck_action { | 39 | enum intel_ring_hangcheck_action { |
| 39 | HANGCHECK_IDLE = 0, | 40 | HANGCHECK_IDLE = 0, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 3e6c0f3ed592..ef9957dbac94 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | |||
| @@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc) | |||
| 510 | MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); | 510 | MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); |
| 511 | } else { | 511 | } else { |
| 512 | /* disable cursor: */ | 512 | /* disable cursor: */ |
| 513 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0); | 513 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), |
| 514 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), | 514 | mdp4_kms->blank_cursor_iova); |
| 515 | MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB)); | ||
| 516 | } | 515 | } |
| 517 | 516 | ||
| 518 | /* and drop the iova ref + obj rev when done scanning out: */ | 517 | /* and drop the iova ref + obj rev when done scanning out: */ |
| @@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 574 | 573 | ||
| 575 | if (old_bo) { | 574 | if (old_bo) { |
| 576 | /* drop our previous reference: */ | 575 | /* drop our previous reference: */ |
| 577 | msm_gem_put_iova(old_bo, mdp4_kms->id); | 576 | drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo); |
| 578 | drm_gem_object_unreference_unlocked(old_bo); | ||
| 579 | } | 577 | } |
| 580 | 578 | ||
| 581 | crtc_flush(crtc); | ||
| 582 | request_pending(crtc, PENDING_CURSOR); | 579 | request_pending(crtc, PENDING_CURSOR); |
| 583 | 580 | ||
| 584 | return 0; | 581 | return 0; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index c740ccd1cc67..8edd531cb621 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | |||
| @@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms) | |||
| 70 | 70 | ||
| 71 | VERB("status=%08x", status); | 71 | VERB("status=%08x", status); |
| 72 | 72 | ||
| 73 | mdp_dispatch_irqs(mdp_kms, status); | ||
| 74 | |||
| 73 | for (id = 0; id < priv->num_crtcs; id++) | 75 | for (id = 0; id < priv->num_crtcs; id++) |
| 74 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) | 76 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) |
| 75 | drm_handle_vblank(dev, id); | 77 | drm_handle_vblank(dev, id); |
| 76 | 78 | ||
| 77 | mdp_dispatch_irqs(mdp_kms, status); | ||
| 78 | |||
| 79 | return IRQ_HANDLED; | 79 | return IRQ_HANDLED; |
| 80 | } | 80 | } |
| 81 | 81 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 272e707c9487..0bb4faa17523 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | |||
| @@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) | |||
| 144 | static void mdp4_destroy(struct msm_kms *kms) | 144 | static void mdp4_destroy(struct msm_kms *kms) |
| 145 | { | 145 | { |
| 146 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | 146 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
| 147 | if (mdp4_kms->blank_cursor_iova) | ||
| 148 | msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); | ||
| 149 | if (mdp4_kms->blank_cursor_bo) | ||
| 150 | drm_gem_object_unreference(mdp4_kms->blank_cursor_bo); | ||
| 147 | kfree(mdp4_kms); | 151 | kfree(mdp4_kms); |
| 148 | } | 152 | } |
| 149 | 153 | ||
| @@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) | |||
| 372 | goto fail; | 376 | goto fail; |
| 373 | } | 377 | } |
| 374 | 378 | ||
| 379 | mutex_lock(&dev->struct_mutex); | ||
| 380 | mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); | ||
| 381 | mutex_unlock(&dev->struct_mutex); | ||
| 382 | if (IS_ERR(mdp4_kms->blank_cursor_bo)) { | ||
| 383 | ret = PTR_ERR(mdp4_kms->blank_cursor_bo); | ||
| 384 | dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); | ||
| 385 | mdp4_kms->blank_cursor_bo = NULL; | ||
| 386 | goto fail; | ||
| 387 | } | ||
| 388 | |||
| 389 | ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id, | ||
| 390 | &mdp4_kms->blank_cursor_iova); | ||
| 391 | if (ret) { | ||
| 392 | dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); | ||
| 393 | goto fail; | ||
| 394 | } | ||
| 395 | |||
| 375 | return kms; | 396 | return kms; |
| 376 | 397 | ||
| 377 | fail: | 398 | fail: |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 66a4d31aec80..715520c54cde 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | |||
| @@ -44,6 +44,10 @@ struct mdp4_kms { | |||
| 44 | struct clk *lut_clk; | 44 | struct clk *lut_clk; |
| 45 | 45 | ||
| 46 | struct mdp_irq error_handler; | 46 | struct mdp_irq error_handler; |
| 47 | |||
| 48 | /* empty/blank cursor bo to use when cursor is "disabled" */ | ||
| 49 | struct drm_gem_object *blank_cursor_bo; | ||
| 50 | uint32_t blank_cursor_iova; | ||
| 47 | }; | 51 | }; |
| 48 | #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) | 52 | #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) |
| 49 | 53 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 353d494a497f..f2b985bc2adf 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | |||
| @@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) | |||
| 71 | 71 | ||
| 72 | VERB("status=%08x", status); | 72 | VERB("status=%08x", status); |
| 73 | 73 | ||
| 74 | mdp_dispatch_irqs(mdp_kms, status); | ||
| 75 | |||
| 74 | for (id = 0; id < priv->num_crtcs; id++) | 76 | for (id = 0; id < priv->num_crtcs; id++) |
| 75 | if (status & mdp5_crtc_vblank(priv->crtcs[id])) | 77 | if (status & mdp5_crtc_vblank(priv->crtcs[id])) |
| 76 | drm_handle_vblank(dev, id); | 78 | drm_handle_vblank(dev, id); |
| 77 | |||
| 78 | mdp_dispatch_irqs(mdp_kms, status); | ||
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | irqreturn_t mdp5_irq(struct msm_kms *kms) | 81 | irqreturn_t mdp5_irq(struct msm_kms *kms) |
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 6c6d7d4c9b4e..a752ab83b810 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c | |||
| @@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, | |||
| 62 | dma_addr_t paddr; | 62 | dma_addr_t paddr; |
| 63 | int ret, size; | 63 | int ret, size; |
| 64 | 64 | ||
| 65 | /* only doing ARGB32 since this is what is needed to alpha-blend | ||
| 66 | * with video overlays: | ||
| 67 | */ | ||
| 68 | sizes->surface_bpp = 32; | 65 | sizes->surface_bpp = 32; |
| 69 | sizes->surface_depth = 32; | 66 | sizes->surface_depth = 24; |
| 70 | 67 | ||
| 71 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, | 68 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, |
| 72 | sizes->surface_height, sizes->surface_bpp, | 69 | sizes->surface_height, sizes->surface_bpp, |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 3da8264d3039..bb8026daebc9 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj) | |||
| 118 | 118 | ||
| 119 | if (iommu_present(&platform_bus_type)) | 119 | if (iommu_present(&platform_bus_type)) |
| 120 | drm_gem_put_pages(obj, msm_obj->pages, true, false); | 120 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
| 121 | else | 121 | else { |
| 122 | drm_mm_remove_node(msm_obj->vram_node); | 122 | drm_mm_remove_node(msm_obj->vram_node); |
| 123 | drm_free_large(msm_obj->pages); | ||
| 124 | } | ||
| 123 | 125 | ||
| 124 | msm_obj->pages = NULL; | 126 | msm_obj->pages = NULL; |
| 125 | } | 127 | } |
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 36c717af6cf9..edb871d7d395 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c | |||
| @@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc) | |||
| 312 | struct drm_device *drm = crtc->dev; | 312 | struct drm_device *drm = crtc->dev; |
| 313 | struct drm_plane *plane; | 313 | struct drm_plane *plane; |
| 314 | 314 | ||
| 315 | list_for_each_entry(plane, &drm->mode_config.plane_list, head) { | 315 | drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) { |
| 316 | if (plane->crtc == crtc) { | 316 | if (plane->crtc == crtc) { |
| 317 | tegra_plane_disable(plane); | 317 | tegra_plane_disable(plane); |
| 318 | plane->crtc = NULL; | 318 | plane->crtc = NULL; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 931490b9cfed..87df0b3674fd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
| 1214 | SVGA3dCmdSurfaceDMA dma; | 1214 | SVGA3dCmdSurfaceDMA dma; |
| 1215 | } *cmd; | 1215 | } *cmd; |
| 1216 | int ret; | 1216 | int ret; |
| 1217 | SVGA3dCmdSurfaceDMASuffix *suffix; | ||
| 1218 | uint32_t bo_size; | ||
| 1217 | 1219 | ||
| 1218 | cmd = container_of(header, struct vmw_dma_cmd, header); | 1220 | cmd = container_of(header, struct vmw_dma_cmd, header); |
| 1221 | suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + | ||
| 1222 | header->size - sizeof(*suffix)); | ||
| 1223 | |||
| 1224 | /* Make sure device and verifier stays in sync. */ | ||
| 1225 | if (unlikely(suffix->suffixSize != sizeof(*suffix))) { | ||
| 1226 | DRM_ERROR("Invalid DMA suffix size.\n"); | ||
| 1227 | return -EINVAL; | ||
| 1228 | } | ||
| 1229 | |||
| 1219 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | 1230 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
| 1220 | &cmd->dma.guest.ptr, | 1231 | &cmd->dma.guest.ptr, |
| 1221 | &vmw_bo); | 1232 | &vmw_bo); |
| 1222 | if (unlikely(ret != 0)) | 1233 | if (unlikely(ret != 0)) |
| 1223 | return ret; | 1234 | return ret; |
| 1224 | 1235 | ||
| 1236 | /* Make sure DMA doesn't cross BO boundaries. */ | ||
| 1237 | bo_size = vmw_bo->base.num_pages * PAGE_SIZE; | ||
| 1238 | if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { | ||
| 1239 | DRM_ERROR("Invalid DMA offset.\n"); | ||
| 1240 | return -EINVAL; | ||
| 1241 | } | ||
| 1242 | |||
| 1243 | bo_size -= cmd->dma.guest.ptr.offset; | ||
| 1244 | if (unlikely(suffix->maximumOffset > bo_size)) | ||
| 1245 | suffix->maximumOffset = bo_size; | ||
| 1246 | |||
| 1225 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1247 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
| 1226 | user_surface_converter, &cmd->dma.host.sid, | 1248 | user_surface_converter, &cmd->dma.host.sid, |
| 1227 | NULL); | 1249 | NULL); |
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 6d02e3b06375..d76f0b70c6e0 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
| @@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) | |||
| 365 | if (cpu_has_tjmax(c)) | 365 | if (cpu_has_tjmax(c)) |
| 366 | dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); | 366 | dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); |
| 367 | } else { | 367 | } else { |
| 368 | val = (eax >> 16) & 0x7f; | 368 | val = (eax >> 16) & 0xff; |
| 369 | /* | 369 | /* |
| 370 | * If the TjMax is not plausible, an assumption | 370 | * If the TjMax is not plausible, an assumption |
| 371 | * will be used | 371 | * will be used |
| 372 | */ | 372 | */ |
| 373 | if (val >= 85) { | 373 | if (val) { |
| 374 | dev_dbg(dev, "TjMax is %d degrees C\n", val); | 374 | dev_dbg(dev, "TjMax is %d degrees C\n", val); |
| 375 | return val * 1000; | 375 | return val * 1000; |
| 376 | } | 376 | } |
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig index d4e8983fba53..23f38cf2c5cd 100644 --- a/drivers/infiniband/hw/cxgb4/Kconfig +++ b/drivers/infiniband/hw/cxgb4/Kconfig | |||
| @@ -1,10 +1,10 @@ | |||
| 1 | config INFINIBAND_CXGB4 | 1 | config INFINIBAND_CXGB4 |
| 2 | tristate "Chelsio T4 RDMA Driver" | 2 | tristate "Chelsio T4/T5 RDMA Driver" |
| 3 | depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) | 3 | depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) |
| 4 | select GENERIC_ALLOCATOR | 4 | select GENERIC_ALLOCATOR |
| 5 | ---help--- | 5 | ---help--- |
| 6 | This is an iWARP/RDMA driver for the Chelsio T4 1GbE and | 6 | This is an iWARP/RDMA driver for the Chelsio T4 and T5 |
| 7 | 10GbE adapters. | 7 | 1GbE, 10GbE adapters and T5 40GbE adapter. |
| 8 | 8 | ||
| 9 | For general information about Chelsio and our products, visit | 9 | For general information about Chelsio and our products, visit |
| 10 | our website at <http://www.chelsio.com>. | 10 | our website at <http://www.chelsio.com>. |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 185452abf32c..1f863a96a480 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
| @@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep) | |||
| 587 | opt2 |= SACK_EN(1); | 587 | opt2 |= SACK_EN(1); |
| 588 | if (wscale && enable_tcp_window_scaling) | 588 | if (wscale && enable_tcp_window_scaling) |
| 589 | opt2 |= WND_SCALE_EN(1); | 589 | opt2 |= WND_SCALE_EN(1); |
| 590 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | ||
| 591 | opt2 |= T5_OPT_2_VALID; | ||
| 592 | opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); | ||
| 593 | } | ||
| 590 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); | 594 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); |
| 591 | 595 | ||
| 592 | if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { | 596 | if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { |
| @@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status) | |||
| 996 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | 1000 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) |
| 997 | { | 1001 | { |
| 998 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1002 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 999 | state_set(&ep->com, ABORTING); | 1003 | __state_set(&ep->com, ABORTING); |
| 1000 | set_bit(ABORT_CONN, &ep->com.history); | 1004 | set_bit(ABORT_CONN, &ep->com.history); |
| 1001 | return send_abort(ep, skb, gfp); | 1005 | return send_abort(ep, skb, gfp); |
| 1002 | } | 1006 | } |
| @@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |||
| 1154 | return credits; | 1158 | return credits; |
| 1155 | } | 1159 | } |
| 1156 | 1160 | ||
| 1157 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | 1161 | static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) |
| 1158 | { | 1162 | { |
| 1159 | struct mpa_message *mpa; | 1163 | struct mpa_message *mpa; |
| 1160 | struct mpa_v2_conn_params *mpa_v2_params; | 1164 | struct mpa_v2_conn_params *mpa_v2_params; |
| @@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1164 | struct c4iw_qp_attributes attrs; | 1168 | struct c4iw_qp_attributes attrs; |
| 1165 | enum c4iw_qp_attr_mask mask; | 1169 | enum c4iw_qp_attr_mask mask; |
| 1166 | int err; | 1170 | int err; |
| 1171 | int disconnect = 0; | ||
| 1167 | 1172 | ||
| 1168 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1173 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 1169 | 1174 | ||
| @@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1173 | * will abort the connection. | 1178 | * will abort the connection. |
| 1174 | */ | 1179 | */ |
| 1175 | if (stop_ep_timer(ep)) | 1180 | if (stop_ep_timer(ep)) |
| 1176 | return; | 1181 | return 0; |
| 1177 | 1182 | ||
| 1178 | /* | 1183 | /* |
| 1179 | * If we get more than the supported amount of private data | 1184 | * If we get more than the supported amount of private data |
| @@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1195 | * if we don't even have the mpa message, then bail. | 1200 | * if we don't even have the mpa message, then bail. |
| 1196 | */ | 1201 | */ |
| 1197 | if (ep->mpa_pkt_len < sizeof(*mpa)) | 1202 | if (ep->mpa_pkt_len < sizeof(*mpa)) |
| 1198 | return; | 1203 | return 0; |
| 1199 | mpa = (struct mpa_message *) ep->mpa_pkt; | 1204 | mpa = (struct mpa_message *) ep->mpa_pkt; |
| 1200 | 1205 | ||
| 1201 | /* Validate MPA header. */ | 1206 | /* Validate MPA header. */ |
| @@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1235 | * We'll continue process when more data arrives. | 1240 | * We'll continue process when more data arrives. |
| 1236 | */ | 1241 | */ |
| 1237 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | 1242 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) |
| 1238 | return; | 1243 | return 0; |
| 1239 | 1244 | ||
| 1240 | if (mpa->flags & MPA_REJECT) { | 1245 | if (mpa->flags & MPA_REJECT) { |
| 1241 | err = -ECONNREFUSED; | 1246 | err = -ECONNREFUSED; |
| @@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1337 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | 1342 | attrs.layer_etype = LAYER_MPA | DDP_LLP; |
| 1338 | attrs.ecode = MPA_NOMATCH_RTR; | 1343 | attrs.ecode = MPA_NOMATCH_RTR; |
| 1339 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1344 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
| 1345 | attrs.send_term = 1; | ||
| 1340 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1346 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
| 1341 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1347 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
| 1342 | err = -ENOMEM; | 1348 | err = -ENOMEM; |
| 1349 | disconnect = 1; | ||
| 1343 | goto out; | 1350 | goto out; |
| 1344 | } | 1351 | } |
| 1345 | 1352 | ||
| @@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
| 1355 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | 1362 | attrs.layer_etype = LAYER_MPA | DDP_LLP; |
| 1356 | attrs.ecode = MPA_INSUFF_IRD; | 1363 | attrs.ecode = MPA_INSUFF_IRD; |
| 1357 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1364 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
| 1365 | attrs.send_term = 1; | ||
| 1358 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1366 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
| 1359 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1367 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
| 1360 | err = -ENOMEM; | 1368 | err = -ENOMEM; |
| 1369 | disconnect = 1; | ||
| 1361 | goto out; | 1370 | goto out; |
| 1362 | } | 1371 | } |
| 1363 | goto out; | 1372 | goto out; |
| @@ -1366,7 +1375,7 @@ err: | |||
| 1366 | send_abort(ep, skb, GFP_KERNEL); | 1375 | send_abort(ep, skb, GFP_KERNEL); |
| 1367 | out: | 1376 | out: |
| 1368 | connect_reply_upcall(ep, err); | 1377 | connect_reply_upcall(ep, err); |
| 1369 | return; | 1378 | return disconnect; |
| 1370 | } | 1379 | } |
| 1371 | 1380 | ||
| 1372 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | 1381 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) |
| @@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1524 | unsigned int tid = GET_TID(hdr); | 1533 | unsigned int tid = GET_TID(hdr); |
| 1525 | struct tid_info *t = dev->rdev.lldi.tids; | 1534 | struct tid_info *t = dev->rdev.lldi.tids; |
| 1526 | __u8 status = hdr->status; | 1535 | __u8 status = hdr->status; |
| 1536 | int disconnect = 0; | ||
| 1527 | 1537 | ||
| 1528 | ep = lookup_tid(t, tid); | 1538 | ep = lookup_tid(t, tid); |
| 1529 | if (!ep) | 1539 | if (!ep) |
| @@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1539 | switch (ep->com.state) { | 1549 | switch (ep->com.state) { |
| 1540 | case MPA_REQ_SENT: | 1550 | case MPA_REQ_SENT: |
| 1541 | ep->rcv_seq += dlen; | 1551 | ep->rcv_seq += dlen; |
| 1542 | process_mpa_reply(ep, skb); | 1552 | disconnect = process_mpa_reply(ep, skb); |
| 1543 | break; | 1553 | break; |
| 1544 | case MPA_REQ_WAIT: | 1554 | case MPA_REQ_WAIT: |
| 1545 | ep->rcv_seq += dlen; | 1555 | ep->rcv_seq += dlen; |
| @@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1555 | ep->com.state, ep->hwtid, status); | 1565 | ep->com.state, ep->hwtid, status); |
| 1556 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1566 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
| 1557 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1567 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
| 1558 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1568 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
| 1569 | disconnect = 1; | ||
| 1559 | break; | 1570 | break; |
| 1560 | } | 1571 | } |
| 1561 | default: | 1572 | default: |
| 1562 | break; | 1573 | break; |
| 1563 | } | 1574 | } |
| 1564 | mutex_unlock(&ep->com.mutex); | 1575 | mutex_unlock(&ep->com.mutex); |
| 1576 | if (disconnect) | ||
| 1577 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | ||
| 1565 | return 0; | 1578 | return 0; |
| 1566 | } | 1579 | } |
| 1567 | 1580 | ||
| @@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, | |||
| 2009 | if (tcph->ece && tcph->cwr) | 2022 | if (tcph->ece && tcph->cwr) |
| 2010 | opt2 |= CCTRL_ECN(1); | 2023 | opt2 |= CCTRL_ECN(1); |
| 2011 | } | 2024 | } |
| 2025 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | ||
| 2026 | opt2 |= T5_OPT_2_VALID; | ||
| 2027 | opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); | ||
| 2028 | } | ||
| 2012 | 2029 | ||
| 2013 | rpl = cplhdr(skb); | 2030 | rpl = cplhdr(skb); |
| 2014 | INIT_TP_WR(rpl, ep->hwtid); | 2031 | INIT_TP_WR(rpl, ep->hwtid); |
| @@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep) | |||
| 3482 | __func__, ep, ep->hwtid, ep->com.state); | 3499 | __func__, ep, ep->hwtid, ep->com.state); |
| 3483 | abort = 0; | 3500 | abort = 0; |
| 3484 | } | 3501 | } |
| 3485 | mutex_unlock(&ep->com.mutex); | ||
| 3486 | if (abort) | 3502 | if (abort) |
| 3487 | abort_connection(ep, NULL, GFP_KERNEL); | 3503 | abort_connection(ep, NULL, GFP_KERNEL); |
| 3504 | mutex_unlock(&ep->com.mutex); | ||
| 3488 | c4iw_put_ep(&ep->com); | 3505 | c4iw_put_ep(&ep->com); |
| 3489 | } | 3506 | } |
| 3490 | 3507 | ||
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7b8c5806a09d..7474b490760a 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
| @@ -435,6 +435,7 @@ struct c4iw_qp_attributes { | |||
| 435 | u8 ecode; | 435 | u8 ecode; |
| 436 | u16 sq_db_inc; | 436 | u16 sq_db_inc; |
| 437 | u16 rq_db_inc; | 437 | u16 rq_db_inc; |
| 438 | u8 send_term; | ||
| 438 | }; | 439 | }; |
| 439 | 440 | ||
| 440 | struct c4iw_qp { | 441 | struct c4iw_qp { |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 7b5114cb486f..086f62f5dc9e 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 1388 | qhp->attr.layer_etype = attrs->layer_etype; | 1388 | qhp->attr.layer_etype = attrs->layer_etype; |
| 1389 | qhp->attr.ecode = attrs->ecode; | 1389 | qhp->attr.ecode = attrs->ecode; |
| 1390 | ep = qhp->ep; | 1390 | ep = qhp->ep; |
| 1391 | disconnect = 1; | 1391 | if (!internal) { |
| 1392 | c4iw_get_ep(&qhp->ep->com); | 1392 | c4iw_get_ep(&qhp->ep->com); |
| 1393 | if (!internal) | ||
| 1394 | terminate = 1; | 1393 | terminate = 1; |
| 1395 | else { | 1394 | disconnect = 1; |
| 1395 | } else { | ||
| 1396 | terminate = qhp->attr.send_term; | ||
| 1396 | ret = rdma_fini(rhp, qhp, ep); | 1397 | ret = rdma_fini(rhp, qhp, ep); |
| 1397 | if (ret) | 1398 | if (ret) |
| 1398 | goto err; | 1399 | goto err; |
| @@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
| 1776 | /* | 1777 | /* |
| 1777 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for | 1778 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for |
| 1778 | * ringing the queue db when we're in DB_FULL mode. | 1779 | * ringing the queue db when we're in DB_FULL mode. |
| 1780 | * Only allow this on T4 devices. | ||
| 1779 | */ | 1781 | */ |
| 1780 | attrs.sq_db_inc = attr->sq_psn; | 1782 | attrs.sq_db_inc = attr->sq_psn; |
| 1781 | attrs.rq_db_inc = attr->rq_psn; | 1783 | attrs.rq_db_inc = attr->rq_psn; |
| 1782 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; | 1784 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; |
| 1783 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; | 1785 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; |
| 1786 | if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && | ||
| 1787 | (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) | ||
| 1788 | return -EINVAL; | ||
| 1784 | 1789 | ||
| 1785 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); | 1790 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); |
| 1786 | } | 1791 | } |
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index dc193c292671..6121ca08fe58 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | |||
| @@ -836,4 +836,18 @@ struct ulptx_idata { | |||
| 836 | #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) | 836 | #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) |
| 837 | #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) | 837 | #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) |
| 838 | 838 | ||
| 839 | enum { /* TCP congestion control algorithms */ | ||
| 840 | CONG_ALG_RENO, | ||
| 841 | CONG_ALG_TAHOE, | ||
| 842 | CONG_ALG_NEWRENO, | ||
| 843 | CONG_ALG_HIGHSPEED | ||
| 844 | }; | ||
| 845 | |||
| 846 | #define S_CONG_CNTRL 14 | ||
| 847 | #define M_CONG_CNTRL 0x3 | ||
| 848 | #define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) | ||
| 849 | #define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) | ||
| 850 | |||
| 851 | #define T5_OPT_2_VALID (1 << 31) | ||
| 852 | |||
| 839 | #endif /* _T4FW_RI_API_H_ */ | 853 | #endif /* _T4FW_RI_API_H_ */ |
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 41be897df8d5..3899ba7821c5 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) | 41 | #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) |
| 42 | #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) | 42 | #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) |
| 43 | #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) | 43 | #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) |
| 44 | #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF | ||
| 44 | 45 | ||
| 45 | #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) | 46 | #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) |
| 46 | #define ARMADA_375_PPI_CAUSE (0x10) | 47 | #define ARMADA_375_PPI_CAUSE (0x10) |
| @@ -132,8 +133,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, | |||
| 132 | struct msi_desc *desc) | 133 | struct msi_desc *desc) |
| 133 | { | 134 | { |
| 134 | struct msi_msg msg; | 135 | struct msi_msg msg; |
| 135 | irq_hw_number_t hwirq; | 136 | int virq, hwirq; |
| 136 | int virq; | ||
| 137 | 137 | ||
| 138 | hwirq = armada_370_xp_alloc_msi(); | 138 | hwirq = armada_370_xp_alloc_msi(); |
| 139 | if (hwirq < 0) | 139 | if (hwirq < 0) |
| @@ -159,8 +159,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, | |||
| 159 | unsigned int irq) | 159 | unsigned int irq) |
| 160 | { | 160 | { |
| 161 | struct irq_data *d = irq_get_irq_data(irq); | 161 | struct irq_data *d = irq_get_irq_data(irq); |
| 162 | unsigned long hwirq = d->hwirq; | ||
| 163 | |||
| 162 | irq_dispose_mapping(irq); | 164 | irq_dispose_mapping(irq); |
| 163 | armada_370_xp_free_msi(d->hwirq); | 165 | armada_370_xp_free_msi(hwirq); |
| 166 | } | ||
| 167 | |||
| 168 | static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev, | ||
| 169 | int nvec, int type) | ||
| 170 | { | ||
| 171 | /* We support MSI, but not MSI-X */ | ||
| 172 | if (type == PCI_CAP_ID_MSI) | ||
| 173 | return 0; | ||
| 174 | return -EINVAL; | ||
| 164 | } | 175 | } |
| 165 | 176 | ||
| 166 | static struct irq_chip armada_370_xp_msi_irq_chip = { | 177 | static struct irq_chip armada_370_xp_msi_irq_chip = { |
| @@ -201,6 +212,7 @@ static int armada_370_xp_msi_init(struct device_node *node, | |||
| 201 | 212 | ||
| 202 | msi_chip->setup_irq = armada_370_xp_setup_msi_irq; | 213 | msi_chip->setup_irq = armada_370_xp_setup_msi_irq; |
| 203 | msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; | 214 | msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; |
| 215 | msi_chip->check_device = armada_370_xp_check_msi_device; | ||
| 204 | msi_chip->of_node = node; | 216 | msi_chip->of_node = node; |
| 205 | 217 | ||
| 206 | armada_370_xp_msi_domain = | 218 | armada_370_xp_msi_domain = |
| @@ -244,35 +256,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock); | |||
| 244 | static int armada_xp_set_affinity(struct irq_data *d, | 256 | static int armada_xp_set_affinity(struct irq_data *d, |
| 245 | const struct cpumask *mask_val, bool force) | 257 | const struct cpumask *mask_val, bool force) |
| 246 | { | 258 | { |
| 247 | unsigned long reg; | ||
| 248 | unsigned long new_mask = 0; | ||
| 249 | unsigned long online_mask = 0; | ||
| 250 | unsigned long count = 0; | ||
| 251 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | 259 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
| 260 | unsigned long reg, mask; | ||
| 252 | int cpu; | 261 | int cpu; |
| 253 | 262 | ||
| 254 | for_each_cpu(cpu, mask_val) { | 263 | /* Select a single core from the affinity mask which is online */ |
| 255 | new_mask |= 1 << cpu_logical_map(cpu); | 264 | cpu = cpumask_any_and(mask_val, cpu_online_mask); |
| 256 | count++; | 265 | mask = 1UL << cpu_logical_map(cpu); |
| 257 | } | ||
| 258 | |||
| 259 | /* | ||
| 260 | * Forbid mutlicore interrupt affinity | ||
| 261 | * This is required since the MPIC HW doesn't limit | ||
| 262 | * several CPUs from acknowledging the same interrupt. | ||
| 263 | */ | ||
| 264 | if (count > 1) | ||
| 265 | return -EINVAL; | ||
| 266 | |||
| 267 | for_each_cpu(cpu, cpu_online_mask) | ||
| 268 | online_mask |= 1 << cpu_logical_map(cpu); | ||
| 269 | 266 | ||
| 270 | raw_spin_lock(&irq_controller_lock); | 267 | raw_spin_lock(&irq_controller_lock); |
| 271 | |||
| 272 | reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); | 268 | reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); |
| 273 | reg = (reg & (~online_mask)) | new_mask; | 269 | reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask; |
| 274 | writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); | 270 | writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); |
| 275 | |||
| 276 | raw_spin_unlock(&irq_controller_lock); | 271 | raw_spin_unlock(&irq_controller_lock); |
| 277 | 272 | ||
| 278 | return 0; | 273 | return 0; |
| @@ -494,15 +489,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
| 494 | 489 | ||
| 495 | #ifdef CONFIG_SMP | 490 | #ifdef CONFIG_SMP |
| 496 | armada_xp_mpic_smp_cpu_init(); | 491 | armada_xp_mpic_smp_cpu_init(); |
| 497 | |||
| 498 | /* | ||
| 499 | * Set the default affinity from all CPUs to the boot cpu. | ||
| 500 | * This is required since the MPIC doesn't limit several CPUs | ||
| 501 | * from acknowledging the same interrupt. | ||
| 502 | */ | ||
| 503 | cpumask_clear(irq_default_affinity); | ||
| 504 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | ||
| 505 | |||
| 506 | #endif | 492 | #endif |
| 507 | 493 | ||
| 508 | armada_370_xp_msi_init(node, main_int_res.start); | 494 | armada_370_xp_msi_init(node, main_int_res.start); |
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index fc817d28d1fe..3d15d16a7088 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c | |||
| @@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node) | |||
| 107 | int i, size, max, reserved = 0, entry; | 107 | int i, size, max, reserved = 0, entry; |
| 108 | const __be32 *irqsr; | 108 | const __be32 *irqsr; |
| 109 | 109 | ||
| 110 | cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); | 110 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); |
| 111 | 111 | ||
| 112 | if (!cb) | 112 | if (!cb) |
| 113 | return -ENOMEM; | 113 | return -ENOMEM; |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1bf4a71919ec..9380be7b1895 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
| @@ -2488,6 +2488,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
| 2488 | 2488 | ||
| 2489 | } else { | 2489 | } else { |
| 2490 | inc_hit_counter(cache, bio); | 2490 | inc_hit_counter(cache, bio); |
| 2491 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); | ||
| 2491 | 2492 | ||
| 2492 | if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && | 2493 | if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && |
| 2493 | !is_dirty(cache, lookup_result.cblock)) | 2494 | !is_dirty(cache, lookup_result.cblock)) |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 53728be84dee..13abade76ad9 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -232,6 +232,13 @@ struct thin_c { | |||
| 232 | struct bio_list deferred_bio_list; | 232 | struct bio_list deferred_bio_list; |
| 233 | struct bio_list retry_on_resume_list; | 233 | struct bio_list retry_on_resume_list; |
| 234 | struct rb_root sort_bio_list; /* sorted list of deferred bios */ | 234 | struct rb_root sort_bio_list; /* sorted list of deferred bios */ |
| 235 | |||
| 236 | /* | ||
| 237 | * Ensures the thin is not destroyed until the worker has finished | ||
| 238 | * iterating the active_thins list. | ||
| 239 | */ | ||
| 240 | atomic_t refcount; | ||
| 241 | struct completion can_destroy; | ||
| 235 | }; | 242 | }; |
| 236 | 243 | ||
| 237 | /*----------------------------------------------------------------*/ | 244 | /*----------------------------------------------------------------*/ |
| @@ -1486,6 +1493,45 @@ static void process_thin_deferred_bios(struct thin_c *tc) | |||
| 1486 | blk_finish_plug(&plug); | 1493 | blk_finish_plug(&plug); |
| 1487 | } | 1494 | } |
| 1488 | 1495 | ||
| 1496 | static void thin_get(struct thin_c *tc); | ||
| 1497 | static void thin_put(struct thin_c *tc); | ||
| 1498 | |||
| 1499 | /* | ||
| 1500 | * We can't hold rcu_read_lock() around code that can block. So we | ||
| 1501 | * find a thin with the rcu lock held; bump a refcount; then drop | ||
| 1502 | * the lock. | ||
| 1503 | */ | ||
| 1504 | static struct thin_c *get_first_thin(struct pool *pool) | ||
| 1505 | { | ||
| 1506 | struct thin_c *tc = NULL; | ||
| 1507 | |||
| 1508 | rcu_read_lock(); | ||
| 1509 | if (!list_empty(&pool->active_thins)) { | ||
| 1510 | tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); | ||
| 1511 | thin_get(tc); | ||
| 1512 | } | ||
| 1513 | rcu_read_unlock(); | ||
| 1514 | |||
| 1515 | return tc; | ||
| 1516 | } | ||
| 1517 | |||
| 1518 | static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) | ||
| 1519 | { | ||
| 1520 | struct thin_c *old_tc = tc; | ||
| 1521 | |||
| 1522 | rcu_read_lock(); | ||
| 1523 | list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { | ||
| 1524 | thin_get(tc); | ||
| 1525 | thin_put(old_tc); | ||
| 1526 | rcu_read_unlock(); | ||
| 1527 | return tc; | ||
| 1528 | } | ||
| 1529 | thin_put(old_tc); | ||
| 1530 | rcu_read_unlock(); | ||
| 1531 | |||
| 1532 | return NULL; | ||
| 1533 | } | ||
| 1534 | |||
| 1489 | static void process_deferred_bios(struct pool *pool) | 1535 | static void process_deferred_bios(struct pool *pool) |
| 1490 | { | 1536 | { |
| 1491 | unsigned long flags; | 1537 | unsigned long flags; |
| @@ -1493,10 +1539,11 @@ static void process_deferred_bios(struct pool *pool) | |||
| 1493 | struct bio_list bios; | 1539 | struct bio_list bios; |
| 1494 | struct thin_c *tc; | 1540 | struct thin_c *tc; |
| 1495 | 1541 | ||
| 1496 | rcu_read_lock(); | 1542 | tc = get_first_thin(pool); |
| 1497 | list_for_each_entry_rcu(tc, &pool->active_thins, list) | 1543 | while (tc) { |
| 1498 | process_thin_deferred_bios(tc); | 1544 | process_thin_deferred_bios(tc); |
| 1499 | rcu_read_unlock(); | 1545 | tc = get_next_thin(pool, tc); |
| 1546 | } | ||
| 1500 | 1547 | ||
| 1501 | /* | 1548 | /* |
| 1502 | * If there are any deferred flush bios, we must commit | 1549 | * If there are any deferred flush bios, we must commit |
| @@ -1578,7 +1625,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) | |||
| 1578 | { | 1625 | { |
| 1579 | struct noflush_work w; | 1626 | struct noflush_work w; |
| 1580 | 1627 | ||
| 1581 | INIT_WORK(&w.worker, fn); | 1628 | INIT_WORK_ONSTACK(&w.worker, fn); |
| 1582 | w.tc = tc; | 1629 | w.tc = tc; |
| 1583 | atomic_set(&w.complete, 0); | 1630 | atomic_set(&w.complete, 0); |
| 1584 | init_waitqueue_head(&w.wait); | 1631 | init_waitqueue_head(&w.wait); |
| @@ -3061,11 +3108,25 @@ static struct target_type pool_target = { | |||
| 3061 | /*---------------------------------------------------------------- | 3108 | /*---------------------------------------------------------------- |
| 3062 | * Thin target methods | 3109 | * Thin target methods |
| 3063 | *--------------------------------------------------------------*/ | 3110 | *--------------------------------------------------------------*/ |
| 3111 | static void thin_get(struct thin_c *tc) | ||
| 3112 | { | ||
| 3113 | atomic_inc(&tc->refcount); | ||
| 3114 | } | ||
| 3115 | |||
| 3116 | static void thin_put(struct thin_c *tc) | ||
| 3117 | { | ||
| 3118 | if (atomic_dec_and_test(&tc->refcount)) | ||
| 3119 | complete(&tc->can_destroy); | ||
| 3120 | } | ||
| 3121 | |||
| 3064 | static void thin_dtr(struct dm_target *ti) | 3122 | static void thin_dtr(struct dm_target *ti) |
| 3065 | { | 3123 | { |
| 3066 | struct thin_c *tc = ti->private; | 3124 | struct thin_c *tc = ti->private; |
| 3067 | unsigned long flags; | 3125 | unsigned long flags; |
| 3068 | 3126 | ||
| 3127 | thin_put(tc); | ||
| 3128 | wait_for_completion(&tc->can_destroy); | ||
| 3129 | |||
| 3069 | spin_lock_irqsave(&tc->pool->lock, flags); | 3130 | spin_lock_irqsave(&tc->pool->lock, flags); |
| 3070 | list_del_rcu(&tc->list); | 3131 | list_del_rcu(&tc->list); |
| 3071 | spin_unlock_irqrestore(&tc->pool->lock, flags); | 3132 | spin_unlock_irqrestore(&tc->pool->lock, flags); |
| @@ -3101,6 +3162,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3101 | struct thin_c *tc; | 3162 | struct thin_c *tc; |
| 3102 | struct dm_dev *pool_dev, *origin_dev; | 3163 | struct dm_dev *pool_dev, *origin_dev; |
| 3103 | struct mapped_device *pool_md; | 3164 | struct mapped_device *pool_md; |
| 3165 | unsigned long flags; | ||
| 3104 | 3166 | ||
| 3105 | mutex_lock(&dm_thin_pool_table.mutex); | 3167 | mutex_lock(&dm_thin_pool_table.mutex); |
| 3106 | 3168 | ||
| @@ -3191,9 +3253,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3191 | 3253 | ||
| 3192 | mutex_unlock(&dm_thin_pool_table.mutex); | 3254 | mutex_unlock(&dm_thin_pool_table.mutex); |
| 3193 | 3255 | ||
| 3194 | spin_lock(&tc->pool->lock); | 3256 | atomic_set(&tc->refcount, 1); |
| 3257 | init_completion(&tc->can_destroy); | ||
| 3258 | |||
| 3259 | spin_lock_irqsave(&tc->pool->lock, flags); | ||
| 3195 | list_add_tail_rcu(&tc->list, &tc->pool->active_thins); | 3260 | list_add_tail_rcu(&tc->list, &tc->pool->active_thins); |
| 3196 | spin_unlock(&tc->pool->lock); | 3261 | spin_unlock_irqrestore(&tc->pool->lock, flags); |
| 3197 | /* | 3262 | /* |
| 3198 | * This synchronize_rcu() call is needed here otherwise we risk a | 3263 | * This synchronize_rcu() call is needed here otherwise we risk a |
| 3199 | * wake_worker() call finding no bios to process (because the newly | 3264 | * wake_worker() call finding no bios to process (because the newly |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 796007a5e0e1..7a7bab8947ae 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
| @@ -330,15 +330,17 @@ test_block_hash: | |||
| 330 | return r; | 330 | return r; |
| 331 | } | 331 | } |
| 332 | } | 332 | } |
| 333 | |||
| 334 | todo = 1 << v->data_dev_block_bits; | 333 | todo = 1 << v->data_dev_block_bits; |
| 335 | while (io->iter.bi_size) { | 334 | do { |
| 336 | u8 *page; | 335 | u8 *page; |
| 336 | unsigned len; | ||
| 337 | struct bio_vec bv = bio_iter_iovec(bio, io->iter); | 337 | struct bio_vec bv = bio_iter_iovec(bio, io->iter); |
| 338 | 338 | ||
| 339 | page = kmap_atomic(bv.bv_page); | 339 | page = kmap_atomic(bv.bv_page); |
| 340 | r = crypto_shash_update(desc, page + bv.bv_offset, | 340 | len = bv.bv_len; |
| 341 | bv.bv_len); | 341 | if (likely(len >= todo)) |
| 342 | len = todo; | ||
| 343 | r = crypto_shash_update(desc, page + bv.bv_offset, len); | ||
| 342 | kunmap_atomic(page); | 344 | kunmap_atomic(page); |
| 343 | 345 | ||
| 344 | if (r < 0) { | 346 | if (r < 0) { |
| @@ -346,8 +348,9 @@ test_block_hash: | |||
| 346 | return r; | 348 | return r; |
| 347 | } | 349 | } |
| 348 | 350 | ||
| 349 | bio_advance_iter(bio, &io->iter, bv.bv_len); | 351 | bio_advance_iter(bio, &io->iter, len); |
| 350 | } | 352 | todo -= len; |
| 353 | } while (todo); | ||
| 351 | 354 | ||
| 352 | if (!v->version) { | 355 | if (!v->version) { |
| 353 | r = crypto_shash_update(desc, v->salt, v->salt_size); | 356 | r = crypto_shash_update(desc, v->salt, v->salt_size); |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 9bcf2cf19357..5aeb89411350 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
| @@ -364,7 +364,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) | |||
| 364 | 364 | ||
| 365 | memset(r, 0, sizeof(*r)); | 365 | memset(r, 0, sizeof(*r)); |
| 366 | /* | 366 | /* |
| 367 | * Get optional "interrupts-names" property to add a name | 367 | * Get optional "interrupt-names" property to add a name |
| 368 | * to the resource. | 368 | * to the resource. |
| 369 | */ | 369 | */ |
| 370 | of_property_read_string_index(dev, "interrupt-names", index, | 370 | of_property_read_string_index(dev, "interrupt-names", index, |
| @@ -380,6 +380,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) | |||
| 380 | EXPORT_SYMBOL_GPL(of_irq_to_resource); | 380 | EXPORT_SYMBOL_GPL(of_irq_to_resource); |
| 381 | 381 | ||
| 382 | /** | 382 | /** |
| 383 | * of_irq_get - Decode a node's IRQ and return it as a Linux irq number | ||
| 384 | * @dev: pointer to device tree node | ||
| 385 | * @index: zero-based index of the irq | ||
| 386 | * | ||
| 387 | * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain | ||
| 388 | * is not yet created. | ||
| 389 | * | ||
| 390 | */ | ||
| 391 | int of_irq_get(struct device_node *dev, int index) | ||
| 392 | { | ||
| 393 | int rc; | ||
| 394 | struct of_phandle_args oirq; | ||
| 395 | struct irq_domain *domain; | ||
| 396 | |||
| 397 | rc = of_irq_parse_one(dev, index, &oirq); | ||
| 398 | if (rc) | ||
| 399 | return rc; | ||
| 400 | |||
| 401 | domain = irq_find_host(oirq.np); | ||
| 402 | if (!domain) | ||
| 403 | return -EPROBE_DEFER; | ||
| 404 | |||
| 405 | return irq_create_of_mapping(&oirq); | ||
| 406 | } | ||
| 407 | |||
| 408 | /** | ||
| 383 | * of_irq_count - Count the number of IRQs a node uses | 409 | * of_irq_count - Count the number of IRQs a node uses |
| 384 | * @dev: pointer to device tree node | 410 | * @dev: pointer to device tree node |
| 385 | */ | 411 | */ |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 404d1daebefa..bd47fbc53dc9 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
| @@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np, | |||
| 168 | rc = of_address_to_resource(np, i, res); | 168 | rc = of_address_to_resource(np, i, res); |
| 169 | WARN_ON(rc); | 169 | WARN_ON(rc); |
| 170 | } | 170 | } |
| 171 | WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq); | 171 | if (of_irq_to_resource_table(np, res, num_irq) != num_irq) |
| 172 | pr_debug("not all legacy IRQ resources mapped for %s\n", | ||
| 173 | np->name); | ||
| 172 | } | 174 | } |
| 173 | 175 | ||
| 174 | dev->dev.of_node = of_node_get(np); | 176 | dev->dev.of_node = of_node_get(np); |
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c index ae4450070503..fe70b86bcffb 100644 --- a/drivers/of/selftest.c +++ b/drivers/of/selftest.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/of.h> | 11 | #include <linux/of.h> |
| 12 | #include <linux/of_irq.h> | 12 | #include <linux/of_irq.h> |
| 13 | #include <linux/of_platform.h> | ||
| 13 | #include <linux/list.h> | 14 | #include <linux/list.h> |
| 14 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
| 15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
| @@ -427,6 +428,36 @@ static void __init of_selftest_match_node(void) | |||
| 427 | } | 428 | } |
| 428 | } | 429 | } |
| 429 | 430 | ||
| 431 | static void __init of_selftest_platform_populate(void) | ||
| 432 | { | ||
| 433 | int irq; | ||
| 434 | struct device_node *np; | ||
| 435 | struct platform_device *pdev; | ||
| 436 | |||
| 437 | np = of_find_node_by_path("/testcase-data"); | ||
| 438 | of_platform_populate(np, of_default_bus_match_table, NULL, NULL); | ||
| 439 | |||
| 440 | /* Test that a missing irq domain returns -EPROBE_DEFER */ | ||
| 441 | np = of_find_node_by_path("/testcase-data/testcase-device1"); | ||
| 442 | pdev = of_find_device_by_node(np); | ||
| 443 | if (!pdev) | ||
| 444 | selftest(0, "device 1 creation failed\n"); | ||
| 445 | irq = platform_get_irq(pdev, 0); | ||
| 446 | if (irq != -EPROBE_DEFER) | ||
| 447 | selftest(0, "device deferred probe failed - %d\n", irq); | ||
| 448 | |||
| 449 | /* Test that a parsing failure does not return -EPROBE_DEFER */ | ||
| 450 | np = of_find_node_by_path("/testcase-data/testcase-device2"); | ||
| 451 | pdev = of_find_device_by_node(np); | ||
| 452 | if (!pdev) | ||
| 453 | selftest(0, "device 2 creation failed\n"); | ||
| 454 | irq = platform_get_irq(pdev, 0); | ||
| 455 | if (irq >= 0 || irq == -EPROBE_DEFER) | ||
| 456 | selftest(0, "device parsing error failed - %d\n", irq); | ||
| 457 | |||
| 458 | selftest(1, "passed"); | ||
| 459 | } | ||
| 460 | |||
| 430 | static int __init of_selftest(void) | 461 | static int __init of_selftest(void) |
| 431 | { | 462 | { |
| 432 | struct device_node *np; | 463 | struct device_node *np; |
| @@ -445,6 +476,7 @@ static int __init of_selftest(void) | |||
| 445 | of_selftest_parse_interrupts(); | 476 | of_selftest_parse_interrupts(); |
| 446 | of_selftest_parse_interrupts_extended(); | 477 | of_selftest_parse_interrupts_extended(); |
| 447 | of_selftest_match_node(); | 478 | of_selftest_match_node(); |
| 479 | of_selftest_platform_populate(); | ||
| 448 | pr_info("end of selftest - %i passed, %i failed\n", | 480 | pr_info("end of selftest - %i passed, %i failed\n", |
| 449 | selftest_results.passed, selftest_results.failed); | 481 | selftest_results.passed, selftest_results.failed); |
| 450 | return 0; | 482 | return 0; |
diff --git a/drivers/of/testcase-data/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi index c843720bd3e5..da4695f60351 100644 --- a/drivers/of/testcase-data/tests-interrupts.dtsi +++ b/drivers/of/testcase-data/tests-interrupts.dtsi | |||
| @@ -54,5 +54,18 @@ | |||
| 54 | <&test_intmap1 1 2>; | 54 | <&test_intmap1 1 2>; |
| 55 | }; | 55 | }; |
| 56 | }; | 56 | }; |
| 57 | |||
| 58 | testcase-device1 { | ||
| 59 | compatible = "testcase-device"; | ||
| 60 | interrupt-parent = <&test_intc0>; | ||
| 61 | interrupts = <1>; | ||
| 62 | }; | ||
| 63 | |||
| 64 | testcase-device2 { | ||
| 65 | compatible = "testcase-device"; | ||
| 66 | interrupt-parent = <&test_intc2>; | ||
| 67 | interrupts = <1>; /* invalid specifier - too short */ | ||
| 68 | }; | ||
| 57 | }; | 69 | }; |
| 70 | |||
| 58 | }; | 71 | }; |
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c index 92ed4b2e3c07..c862f9c0e9ce 100644 --- a/drivers/pinctrl/pinctrl-as3722.c +++ b/drivers/pinctrl/pinctrl-as3722.c | |||
| @@ -64,7 +64,6 @@ struct as3722_pin_function { | |||
| 64 | }; | 64 | }; |
| 65 | 65 | ||
| 66 | struct as3722_gpio_pin_control { | 66 | struct as3722_gpio_pin_control { |
| 67 | bool enable_gpio_invert; | ||
| 68 | unsigned mode_prop; | 67 | unsigned mode_prop; |
| 69 | int io_function; | 68 | int io_function; |
| 70 | }; | 69 | }; |
| @@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev, | |||
| 320 | return mode; | 319 | return mode; |
| 321 | } | 320 | } |
| 322 | 321 | ||
| 323 | if (as_pci->gpio_control[offset].enable_gpio_invert) | 322 | return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset), |
| 324 | mode |= AS3722_GPIO_INV; | 323 | AS3722_GPIO_MODE_MASK, mode); |
| 325 | |||
| 326 | return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode); | ||
| 327 | } | 324 | } |
| 328 | 325 | ||
| 329 | static const struct pinmux_ops as3722_pinmux_ops = { | 326 | static const struct pinmux_ops as3722_pinmux_ops = { |
| @@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset, | |||
| 496 | { | 493 | { |
| 497 | struct as3722_pctrl_info *as_pci = to_as_pci(chip); | 494 | struct as3722_pctrl_info *as_pci = to_as_pci(chip); |
| 498 | struct as3722 *as3722 = as_pci->as3722; | 495 | struct as3722 *as3722 = as_pci->as3722; |
| 499 | int en_invert = as_pci->gpio_control[offset].enable_gpio_invert; | 496 | int en_invert; |
| 500 | u32 val; | 497 | u32 val; |
| 501 | int ret; | 498 | int ret; |
| 502 | 499 | ||
| 500 | ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val); | ||
| 501 | if (ret < 0) { | ||
| 502 | dev_err(as_pci->dev, | ||
| 503 | "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret); | ||
| 504 | return; | ||
| 505 | } | ||
| 506 | en_invert = !!(val & AS3722_GPIO_INV); | ||
| 507 | |||
| 503 | if (value) | 508 | if (value) |
| 504 | val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset); | 509 | val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset); |
| 505 | else | 510 | else |
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 81075f2a1d3f..2960557bfed9 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c | |||
| @@ -810,6 +810,7 @@ static const struct pinconf_ops pcs_pinconf_ops = { | |||
| 810 | static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, | 810 | static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, |
| 811 | unsigned pin_pos) | 811 | unsigned pin_pos) |
| 812 | { | 812 | { |
| 813 | struct pcs_soc_data *pcs_soc = &pcs->socdata; | ||
| 813 | struct pinctrl_pin_desc *pin; | 814 | struct pinctrl_pin_desc *pin; |
| 814 | struct pcs_name *pn; | 815 | struct pcs_name *pn; |
| 815 | int i; | 816 | int i; |
| @@ -821,6 +822,18 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, | |||
| 821 | return -ENOMEM; | 822 | return -ENOMEM; |
| 822 | } | 823 | } |
| 823 | 824 | ||
| 825 | if (pcs_soc->irq_enable_mask) { | ||
| 826 | unsigned val; | ||
| 827 | |||
| 828 | val = pcs->read(pcs->base + offset); | ||
| 829 | if (val & pcs_soc->irq_enable_mask) { | ||
| 830 | dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n", | ||
| 831 | (unsigned long)pcs->res->start + offset, val); | ||
| 832 | val &= ~pcs_soc->irq_enable_mask; | ||
| 833 | pcs->write(val, pcs->base + offset); | ||
| 834 | } | ||
| 835 | } | ||
| 836 | |||
| 824 | pin = &pcs->pins.pa[i]; | 837 | pin = &pcs->pins.pa[i]; |
| 825 | pn = &pcs->names[i]; | 838 | pn = &pcs->names[i]; |
| 826 | sprintf(pn->name, "%lx.%d", | 839 | sprintf(pn->name, "%lx.%d", |
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c index c5e0f6973a3b..26ca6855f478 100644 --- a/drivers/pinctrl/pinctrl-tb10x.c +++ b/drivers/pinctrl/pinctrl-tb10x.c | |||
| @@ -629,9 +629,8 @@ static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl, | |||
| 629 | */ | 629 | */ |
| 630 | for (i = 0; i < state->pinfuncgrpcnt; i++) { | 630 | for (i = 0; i < state->pinfuncgrpcnt; i++) { |
| 631 | const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i]; | 631 | const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i]; |
| 632 | unsigned int port = pfg->port; | ||
| 633 | unsigned int mode = pfg->mode; | 632 | unsigned int mode = pfg->mode; |
| 634 | int j; | 633 | int j, port = pfg->port; |
| 635 | 634 | ||
| 636 | /* | 635 | /* |
| 637 | * Skip pin groups which are always mapped and don't need | 636 | * Skip pin groups which are always mapped and don't need |
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c index 48093719167a..f5cd3f961808 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c | |||
| @@ -4794,8 +4794,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { | |||
| 4794 | FN_MSIOF0_SCK_B, 0, | 4794 | FN_MSIOF0_SCK_B, 0, |
| 4795 | /* IP5_23_21 [3] */ | 4795 | /* IP5_23_21 [3] */ |
| 4796 | FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4, | 4796 | FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4, |
| 4797 | FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, | 4797 | FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C, |
| 4798 | FN_IERX_C, 0, | ||
| 4799 | /* IP5_20_18 [3] */ | 4798 | /* IP5_20_18 [3] */ |
| 4800 | FN_WE0_N, FN_IECLK, FN_CAN_CLK, | 4799 | FN_WE0_N, FN_IECLK, FN_CAN_CLK, |
| 4801 | FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0, | 4800 | FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0, |
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 5186d70c49d4..7868bf3a0f91 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c | |||
| @@ -5288,7 +5288,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { | |||
| 5288 | /* SEL_SCIF3 [2] */ | 5288 | /* SEL_SCIF3 [2] */ |
| 5289 | FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3, | 5289 | FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3, |
| 5290 | /* SEL_IEB [2] */ | 5290 | /* SEL_IEB [2] */ |
| 5291 | FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, | 5291 | FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0, |
| 5292 | /* SEL_MMC [1] */ | 5292 | /* SEL_MMC [1] */ |
| 5293 | FN_SEL_MMC_0, FN_SEL_MMC_1, | 5293 | FN_SEL_MMC_0, FN_SEL_MMC_1, |
| 5294 | /* SEL_SCIF5 [1] */ | 5294 | /* SEL_SCIF5 [1] */ |
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 9f611cbbc294..c31aa07b3ba5 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c | |||
| @@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) | |||
| 83 | { | 83 | { |
| 84 | struct acpi_device *acpi_dev; | 84 | struct acpi_device *acpi_dev; |
| 85 | acpi_handle handle; | 85 | acpi_handle handle; |
| 86 | struct acpi_buffer buffer; | 86 | int ret = 0; |
| 87 | int ret; | ||
| 88 | 87 | ||
| 89 | pnp_dbg(&dev->dev, "set resources\n"); | 88 | pnp_dbg(&dev->dev, "set resources\n"); |
| 90 | 89 | ||
| @@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) | |||
| 97 | if (WARN_ON_ONCE(acpi_dev != dev->data)) | 96 | if (WARN_ON_ONCE(acpi_dev != dev->data)) |
| 98 | dev->data = acpi_dev; | 97 | dev->data = acpi_dev; |
| 99 | 98 | ||
| 100 | ret = pnpacpi_build_resource_template(dev, &buffer); | 99 | if (acpi_has_method(handle, METHOD_NAME__SRS)) { |
| 101 | if (ret) | 100 | struct acpi_buffer buffer; |
| 102 | return ret; | 101 | |
| 103 | ret = pnpacpi_encode_resources(dev, &buffer); | 102 | ret = pnpacpi_build_resource_template(dev, &buffer); |
| 104 | if (ret) { | 103 | if (ret) |
| 104 | return ret; | ||
| 105 | |||
| 106 | ret = pnpacpi_encode_resources(dev, &buffer); | ||
| 107 | if (!ret) { | ||
| 108 | acpi_status status; | ||
| 109 | |||
| 110 | status = acpi_set_current_resources(handle, &buffer); | ||
| 111 | if (ACPI_FAILURE(status)) | ||
| 112 | ret = -EIO; | ||
| 113 | } | ||
| 105 | kfree(buffer.pointer); | 114 | kfree(buffer.pointer); |
| 106 | return ret; | ||
| 107 | } | 115 | } |
| 108 | if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer))) | 116 | if (!ret && acpi_bus_power_manageable(handle)) |
| 109 | ret = -EINVAL; | ||
| 110 | else if (acpi_bus_power_manageable(handle)) | ||
| 111 | ret = acpi_bus_set_power(handle, ACPI_STATE_D0); | 117 | ret = acpi_bus_set_power(handle, ACPI_STATE_D0); |
| 112 | kfree(buffer.pointer); | 118 | |
| 113 | return ret; | 119 | return ret; |
| 114 | } | 120 | } |
| 115 | 121 | ||
| @@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) | |||
| 117 | { | 123 | { |
| 118 | struct acpi_device *acpi_dev; | 124 | struct acpi_device *acpi_dev; |
| 119 | acpi_handle handle; | 125 | acpi_handle handle; |
| 120 | int ret; | 126 | acpi_status status; |
| 121 | 127 | ||
| 122 | dev_dbg(&dev->dev, "disable resources\n"); | 128 | dev_dbg(&dev->dev, "disable resources\n"); |
| 123 | 129 | ||
| @@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) | |||
| 128 | } | 134 | } |
| 129 | 135 | ||
| 130 | /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ | 136 | /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ |
| 131 | ret = 0; | ||
| 132 | if (acpi_bus_power_manageable(handle)) | 137 | if (acpi_bus_power_manageable(handle)) |
| 133 | acpi_bus_set_power(handle, ACPI_STATE_D3_COLD); | 138 | acpi_bus_set_power(handle, ACPI_STATE_D3_COLD); |
| 134 | /* continue even if acpi_bus_set_power() fails */ | 139 | |
| 135 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) | 140 | /* continue even if acpi_bus_set_power() fails */ |
| 136 | ret = -ENODEV; | 141 | status = acpi_evaluate_object(handle, "_DIS", NULL, NULL); |
| 137 | return ret; | 142 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) |
| 143 | return -ENODEV; | ||
| 144 | |||
| 145 | return 0; | ||
| 138 | } | 146 | } |
| 139 | 147 | ||
| 140 | #ifdef CONFIG_ACPI_SLEEP | 148 | #ifdef CONFIG_ACPI_SLEEP |
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index 3736bc408adb..ebf0d6710b5a 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c | |||
| @@ -335,7 +335,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev) | |||
| 335 | } | 335 | } |
| 336 | #endif | 336 | #endif |
| 337 | 337 | ||
| 338 | #ifdef CONFIG_X86 | 338 | #ifdef CONFIG_PCI |
| 339 | /* Device IDs of parts that have 32KB MCH space */ | 339 | /* Device IDs of parts that have 32KB MCH space */ |
| 340 | static const unsigned int mch_quirk_devices[] = { | 340 | static const unsigned int mch_quirk_devices[] = { |
| 341 | 0x0154, /* Ivy Bridge */ | 341 | 0x0154, /* Ivy Bridge */ |
| @@ -440,7 +440,7 @@ static struct pnp_fixup pnp_fixups[] = { | |||
| 440 | #ifdef CONFIG_AMD_NB | 440 | #ifdef CONFIG_AMD_NB |
| 441 | {"PNP0c01", quirk_amd_mmconfig_area}, | 441 | {"PNP0c01", quirk_amd_mmconfig_area}, |
| 442 | #endif | 442 | #endif |
| 443 | #ifdef CONFIG_X86 | 443 | #ifdef CONFIG_PCI |
| 444 | {"PNP0c02", quirk_intel_mch}, | 444 | {"PNP0c02", quirk_intel_mch}, |
| 445 | #endif | 445 | #endif |
| 446 | {""} | 446 | {""} |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 9f0ea6cb6922..e3bf885f4a6c 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
| @@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) | |||
| 541 | 541 | ||
| 542 | static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) | 542 | static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) |
| 543 | { | 543 | { |
| 544 | do { | 544 | static int ntsm_unsupported; |
| 545 | |||
| 546 | while (true) { | ||
| 545 | memset(sei, 0, sizeof(*sei)); | 547 | memset(sei, 0, sizeof(*sei)); |
| 546 | sei->request.length = 0x0010; | 548 | sei->request.length = 0x0010; |
| 547 | sei->request.code = 0x000e; | 549 | sei->request.code = 0x000e; |
| 548 | sei->ntsm = ntsm; | 550 | if (!ntsm_unsupported) |
| 551 | sei->ntsm = ntsm; | ||
| 549 | 552 | ||
| 550 | if (chsc(sei)) | 553 | if (chsc(sei)) |
| 551 | break; | 554 | break; |
| 552 | 555 | ||
| 553 | if (sei->response.code != 0x0001) { | 556 | if (sei->response.code != 0x0001) { |
| 554 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", | 557 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", |
| 555 | sei->response.code); | 558 | sei->response.code, sei->ntsm); |
| 559 | |||
| 560 | if (sei->response.code == 3 && sei->ntsm) { | ||
| 561 | /* Fallback for old firmware. */ | ||
| 562 | ntsm_unsupported = 1; | ||
| 563 | continue; | ||
| 564 | } | ||
| 556 | break; | 565 | break; |
| 557 | } | 566 | } |
| 558 | 567 | ||
| @@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) | |||
| 568 | CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); | 577 | CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); |
| 569 | break; | 578 | break; |
| 570 | } | 579 | } |
| 571 | } while (sei->u.nt0_area.flags & 0x80); | 580 | |
| 581 | if (!(sei->u.nt0_area.flags & 0x80)) | ||
| 582 | break; | ||
| 583 | } | ||
| 572 | } | 584 | } |
| 573 | 585 | ||
| 574 | /* | 586 | /* |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 7f0af4fcc001..6fd7d40b2c4d 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
| @@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 8293 | 8293 | ||
| 8294 | mpt2sas_base_free_resources(ioc); | 8294 | mpt2sas_base_free_resources(ioc); |
| 8295 | pci_save_state(pdev); | 8295 | pci_save_state(pdev); |
| 8296 | pci_disable_device(pdev); | ||
| 8297 | pci_set_power_state(pdev, device_state); | 8296 | pci_set_power_state(pdev, device_state); |
| 8298 | return 0; | 8297 | return 0; |
| 8299 | } | 8298 | } |
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 16bfd50cd3fe..db3b494e5926 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
| @@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) | |||
| 750 | 750 | ||
| 751 | vscsi->affinity_hint_set = true; | 751 | vscsi->affinity_hint_set = true; |
| 752 | } else { | 752 | } else { |
| 753 | for (i = 0; i < vscsi->num_queues; i++) | 753 | for (i = 0; i < vscsi->num_queues; i++) { |
| 754 | if (!vscsi->req_vqs[i].vq) | ||
| 755 | continue; | ||
| 756 | |||
| 754 | virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); | 757 | virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); |
| 758 | } | ||
| 755 | 759 | ||
| 756 | vscsi->affinity_hint_set = false; | 760 | vscsi->affinity_hint_set = false; |
| 757 | } | 761 | } |
| @@ -112,6 +112,11 @@ struct kioctx { | |||
| 112 | 112 | ||
| 113 | struct work_struct free_work; | 113 | struct work_struct free_work; |
| 114 | 114 | ||
| 115 | /* | ||
| 116 | * signals when all in-flight requests are done | ||
| 117 | */ | ||
| 118 | struct completion *requests_done; | ||
| 119 | |||
| 115 | struct { | 120 | struct { |
| 116 | /* | 121 | /* |
| 117 | * This counts the number of available slots in the ringbuffer, | 122 | * This counts the number of available slots in the ringbuffer, |
| @@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref) | |||
| 508 | { | 513 | { |
| 509 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); | 514 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); |
| 510 | 515 | ||
| 516 | /* At this point we know that there are no any in-flight requests */ | ||
| 517 | if (ctx->requests_done) | ||
| 518 | complete(ctx->requests_done); | ||
| 519 | |||
| 511 | INIT_WORK(&ctx->free_work, free_ioctx); | 520 | INIT_WORK(&ctx->free_work, free_ioctx); |
| 512 | schedule_work(&ctx->free_work); | 521 | schedule_work(&ctx->free_work); |
| 513 | } | 522 | } |
| @@ -718,7 +727,8 @@ err: | |||
| 718 | * when the processes owning a context have all exited to encourage | 727 | * when the processes owning a context have all exited to encourage |
| 719 | * the rapid destruction of the kioctx. | 728 | * the rapid destruction of the kioctx. |
| 720 | */ | 729 | */ |
| 721 | static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) | 730 | static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, |
| 731 | struct completion *requests_done) | ||
| 722 | { | 732 | { |
| 723 | if (!atomic_xchg(&ctx->dead, 1)) { | 733 | if (!atomic_xchg(&ctx->dead, 1)) { |
| 724 | struct kioctx_table *table; | 734 | struct kioctx_table *table; |
| @@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) | |||
| 747 | if (ctx->mmap_size) | 757 | if (ctx->mmap_size) |
| 748 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | 758 | vm_munmap(ctx->mmap_base, ctx->mmap_size); |
| 749 | 759 | ||
| 760 | ctx->requests_done = requests_done; | ||
| 750 | percpu_ref_kill(&ctx->users); | 761 | percpu_ref_kill(&ctx->users); |
| 762 | } else { | ||
| 763 | if (requests_done) | ||
| 764 | complete(requests_done); | ||
| 751 | } | 765 | } |
| 752 | } | 766 | } |
| 753 | 767 | ||
| @@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm) | |||
| 809 | */ | 823 | */ |
| 810 | ctx->mmap_size = 0; | 824 | ctx->mmap_size = 0; |
| 811 | 825 | ||
| 812 | kill_ioctx(mm, ctx); | 826 | kill_ioctx(mm, ctx, NULL); |
| 813 | } | 827 | } |
| 814 | } | 828 | } |
| 815 | 829 | ||
| @@ -1185,7 +1199,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) | |||
| 1185 | if (!IS_ERR(ioctx)) { | 1199 | if (!IS_ERR(ioctx)) { |
| 1186 | ret = put_user(ioctx->user_id, ctxp); | 1200 | ret = put_user(ioctx->user_id, ctxp); |
| 1187 | if (ret) | 1201 | if (ret) |
| 1188 | kill_ioctx(current->mm, ioctx); | 1202 | kill_ioctx(current->mm, ioctx, NULL); |
| 1189 | percpu_ref_put(&ioctx->users); | 1203 | percpu_ref_put(&ioctx->users); |
| 1190 | } | 1204 | } |
| 1191 | 1205 | ||
| @@ -1203,8 +1217,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) | |||
| 1203 | { | 1217 | { |
| 1204 | struct kioctx *ioctx = lookup_ioctx(ctx); | 1218 | struct kioctx *ioctx = lookup_ioctx(ctx); |
| 1205 | if (likely(NULL != ioctx)) { | 1219 | if (likely(NULL != ioctx)) { |
| 1206 | kill_ioctx(current->mm, ioctx); | 1220 | struct completion requests_done = |
| 1221 | COMPLETION_INITIALIZER_ONSTACK(requests_done); | ||
| 1222 | |||
| 1223 | /* Pass requests_done to kill_ioctx() where it can be set | ||
| 1224 | * in a thread-safe way. If we try to set it here then we have | ||
| 1225 | * a race condition if two io_destroy() called simultaneously. | ||
| 1226 | */ | ||
| 1227 | kill_ioctx(current->mm, ioctx, &requests_done); | ||
| 1207 | percpu_ref_put(&ioctx->users); | 1228 | percpu_ref_put(&ioctx->users); |
| 1229 | |||
| 1230 | /* Wait until all IO for the context are done. Otherwise kernel | ||
| 1231 | * keep using user-space buffers even if user thinks the context | ||
| 1232 | * is destroyed. | ||
| 1233 | */ | ||
| 1234 | wait_for_completion(&requests_done); | ||
| 1235 | |||
| 1208 | return 0; | 1236 | return 0; |
| 1209 | } | 1237 | } |
| 1210 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | 1238 | pr_debug("EINVAL: io_destroy: invalid context id\n"); |
| @@ -1299,10 +1327,8 @@ rw_common: | |||
| 1299 | &iovec, compat) | 1327 | &iovec, compat) |
| 1300 | : aio_setup_single_vector(req, rw, buf, &nr_segs, | 1328 | : aio_setup_single_vector(req, rw, buf, &nr_segs, |
| 1301 | iovec); | 1329 | iovec); |
| 1302 | if (ret) | 1330 | if (!ret) |
| 1303 | return ret; | 1331 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); |
| 1304 | |||
| 1305 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); | ||
| 1306 | if (ret < 0) { | 1332 | if (ret < 0) { |
| 1307 | if (iovec != &inline_vec) | 1333 | if (iovec != &inline_vec) |
| 1308 | kfree(iovec); | 1334 | kfree(iovec); |
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index d96deb443f18..94f9ea8abcae 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h | |||
| @@ -50,7 +50,7 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct | |||
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | #ifndef zero_bytemask | 52 | #ifndef zero_bytemask |
| 53 | #define zero_bytemask(mask) (~0ul << __fls(mask) << 1) | 53 | #define zero_bytemask(mask) (~1ul << __fls(mask)) |
| 54 | #endif | 54 | #endif |
| 55 | 55 | ||
| 56 | #endif /* _ASM_WORD_AT_A_TIME_H */ | 56 | #endif /* _ASM_WORD_AT_A_TIME_H */ |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9212b017bc72..ae9504b4b67d 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -535,6 +535,7 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a | |||
| 535 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | 535 | extern int ftrace_arch_read_dyn_info(char *buf, int size); |
| 536 | 536 | ||
| 537 | extern int skip_trace(unsigned long ip); | 537 | extern int skip_trace(unsigned long ip); |
| 538 | extern void ftrace_module_init(struct module *mod); | ||
| 538 | 539 | ||
| 539 | extern void ftrace_disable_daemon(void); | 540 | extern void ftrace_disable_daemon(void); |
| 540 | extern void ftrace_enable_daemon(void); | 541 | extern void ftrace_enable_daemon(void); |
| @@ -544,6 +545,7 @@ static inline int ftrace_force_update(void) { return 0; } | |||
| 544 | static inline void ftrace_disable_daemon(void) { } | 545 | static inline void ftrace_disable_daemon(void) { } |
| 545 | static inline void ftrace_enable_daemon(void) { } | 546 | static inline void ftrace_enable_daemon(void) { } |
| 546 | static inline void ftrace_release_mod(struct module *mod) {} | 547 | static inline void ftrace_release_mod(struct module *mod) {} |
| 548 | static inline void ftrace_module_init(struct module *mod) {} | ||
| 547 | static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) | 549 | static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) |
| 548 | { | 550 | { |
| 549 | return -EINVAL; | 551 | return -EINVAL; |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 8834a7e5b944..97ac926c78a7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -210,7 +210,7 @@ extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, | |||
| 210 | /** | 210 | /** |
| 211 | * irq_set_affinity - Set the irq affinity of a given irq | 211 | * irq_set_affinity - Set the irq affinity of a given irq |
| 212 | * @irq: Interrupt to set affinity | 212 | * @irq: Interrupt to set affinity |
| 213 | * @mask: cpumask | 213 | * @cpumask: cpumask |
| 214 | * | 214 | * |
| 215 | * Fails if cpumask does not contain an online CPU | 215 | * Fails if cpumask does not contain an online CPU |
| 216 | */ | 216 | */ |
| @@ -223,7 +223,7 @@ irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 223 | /** | 223 | /** |
| 224 | * irq_force_affinity - Force the irq affinity of a given irq | 224 | * irq_force_affinity - Force the irq affinity of a given irq |
| 225 | * @irq: Interrupt to set affinity | 225 | * @irq: Interrupt to set affinity |
| 226 | * @mask: cpumask | 226 | * @cpumask: cpumask |
| 227 | * | 227 | * |
| 228 | * Same as irq_set_affinity, but without checking the mask against | 228 | * Same as irq_set_affinity, but without checking the mask against |
| 229 | * online cpus. | 229 | * online cpus. |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 10a0b1ac4ea0..5c57efb863d0 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -603,6 +603,8 @@ static inline u32 irq_get_trigger_type(unsigned int irq) | |||
| 603 | return d ? irqd_get_trigger_type(d) : 0; | 603 | return d ? irqd_get_trigger_type(d) : 0; |
| 604 | } | 604 | } |
| 605 | 605 | ||
| 606 | unsigned int arch_dynirq_lower_bound(unsigned int from); | ||
| 607 | |||
| 606 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | 608 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
| 607 | struct module *owner); | 609 | struct module *owner); |
| 608 | 610 | ||
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 3f23b4472c31..6404253d810d 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h | |||
| @@ -44,11 +44,16 @@ extern void of_irq_init(const struct of_device_id *matches); | |||
| 44 | 44 | ||
| 45 | #ifdef CONFIG_OF_IRQ | 45 | #ifdef CONFIG_OF_IRQ |
| 46 | extern int of_irq_count(struct device_node *dev); | 46 | extern int of_irq_count(struct device_node *dev); |
| 47 | extern int of_irq_get(struct device_node *dev, int index); | ||
| 47 | #else | 48 | #else |
| 48 | static inline int of_irq_count(struct device_node *dev) | 49 | static inline int of_irq_count(struct device_node *dev) |
| 49 | { | 50 | { |
| 50 | return 0; | 51 | return 0; |
| 51 | } | 52 | } |
| 53 | static inline int of_irq_get(struct device_node *dev, int index) | ||
| 54 | { | ||
| 55 | return 0; | ||
| 56 | } | ||
| 52 | #endif | 57 | #endif |
| 53 | 58 | ||
| 54 | #if defined(CONFIG_OF) | 59 | #if defined(CONFIG_OF) |
diff --git a/include/trace/events/module.h b/include/trace/events/module.h index 11fd51b413de..ed0b2c599a64 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h | |||
| @@ -25,7 +25,7 @@ struct module; | |||
| 25 | { (1UL << TAINT_OOT_MODULE), "O" }, \ | 25 | { (1UL << TAINT_OOT_MODULE), "O" }, \ |
| 26 | { (1UL << TAINT_FORCED_MODULE), "F" }, \ | 26 | { (1UL << TAINT_FORCED_MODULE), "F" }, \ |
| 27 | { (1UL << TAINT_CRAP), "C" }, \ | 27 | { (1UL << TAINT_CRAP), "C" }, \ |
| 28 | { (1UL << TAINT_UNSIGNED_MODULE), "X" }) | 28 | { (1UL << TAINT_UNSIGNED_MODULE), "E" }) |
| 29 | 29 | ||
| 30 | TRACE_EVENT(module_load, | 30 | TRACE_EVENT(module_load, |
| 31 | 31 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index d55092ceee29..6b715c0af1b1 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -234,6 +234,11 @@ again: | |||
| 234 | goto again; | 234 | goto again; |
| 235 | } | 235 | } |
| 236 | timer->base = new_base; | 236 | timer->base = new_base; |
| 237 | } else { | ||
| 238 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | ||
| 239 | cpu = this_cpu; | ||
| 240 | goto again; | ||
| 241 | } | ||
| 237 | } | 242 | } |
| 238 | return new_base; | 243 | return new_base; |
| 239 | } | 244 | } |
| @@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |||
| 569 | 574 | ||
| 570 | cpu_base->expires_next.tv64 = expires_next.tv64; | 575 | cpu_base->expires_next.tv64 = expires_next.tv64; |
| 571 | 576 | ||
| 577 | /* | ||
| 578 | * If a hang was detected in the last timer interrupt then we | ||
| 579 | * leave the hang delay active in the hardware. We want the | ||
| 580 | * system to make progress. That also prevents the following | ||
| 581 | * scenario: | ||
| 582 | * T1 expires 50ms from now | ||
| 583 | * T2 expires 5s from now | ||
| 584 | * | ||
| 585 | * T1 is removed, so this code is called and would reprogram | ||
| 586 | * the hardware to 5s from now. Any hrtimer_start after that | ||
| 587 | * will not reprogram the hardware due to hang_detected being | ||
| 588 | * set. So we'd effectivly block all timers until the T2 event | ||
| 589 | * fires. | ||
| 590 | */ | ||
| 591 | if (cpu_base->hang_detected) | ||
| 592 | return; | ||
| 593 | |||
| 572 | if (cpu_base->expires_next.tv64 != KTIME_MAX) | 594 | if (cpu_base->expires_next.tv64 != KTIME_MAX) |
| 573 | tick_program_event(cpu_base->expires_next, 1); | 595 | tick_program_event(cpu_base->expires_next, 1); |
| 574 | } | 596 | } |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index a7174617616b..bb07f2928f4b 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -363,6 +363,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | |||
| 363 | if (from > irq) | 363 | if (from > irq) |
| 364 | return -EINVAL; | 364 | return -EINVAL; |
| 365 | from = irq; | 365 | from = irq; |
| 366 | } else { | ||
| 367 | /* | ||
| 368 | * For interrupts which are freely allocated the | ||
| 369 | * architecture can force a lower bound to the @from | ||
| 370 | * argument. x86 uses this to exclude the GSI space. | ||
| 371 | */ | ||
| 372 | from = arch_dynirq_lower_bound(from); | ||
| 366 | } | 373 | } |
| 367 | 374 | ||
| 368 | mutex_lock(&sparse_irq_lock); | 375 | mutex_lock(&sparse_irq_lock); |
diff --git a/kernel/module.c b/kernel/module.c index 11869408f79b..079c4615607d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, | |||
| 815 | return -EFAULT; | 815 | return -EFAULT; |
| 816 | name[MODULE_NAME_LEN-1] = '\0'; | 816 | name[MODULE_NAME_LEN-1] = '\0'; |
| 817 | 817 | ||
| 818 | if (!(flags & O_NONBLOCK)) | ||
| 819 | pr_warn("waiting module removal not supported: please upgrade\n"); | ||
| 820 | |||
| 821 | if (mutex_lock_interruptible(&module_mutex) != 0) | 818 | if (mutex_lock_interruptible(&module_mutex) != 0) |
| 822 | return -EINTR; | 819 | return -EINTR; |
| 823 | 820 | ||
| @@ -3271,6 +3268,9 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3271 | 3268 | ||
| 3272 | dynamic_debug_setup(info->debug, info->num_debug); | 3269 | dynamic_debug_setup(info->debug, info->num_debug); |
| 3273 | 3270 | ||
| 3271 | /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ | ||
| 3272 | ftrace_module_init(mod); | ||
| 3273 | |||
| 3274 | /* Finally it's fully formed, ready to start executing. */ | 3274 | /* Finally it's fully formed, ready to start executing. */ |
| 3275 | err = complete_formation(mod, info); | 3275 | err = complete_formation(mod, info); |
| 3276 | if (err) | 3276 | if (err) |
diff --git a/kernel/softirq.c b/kernel/softirq.c index b50990a5bea0..33e4648ae0e7 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -779,3 +779,8 @@ int __init __weak arch_early_irq_init(void) | |||
| 779 | { | 779 | { |
| 780 | return 0; | 780 | return 0; |
| 781 | } | 781 | } |
| 782 | |||
| 783 | unsigned int __weak arch_dynirq_lower_bound(unsigned int from) | ||
| 784 | { | ||
| 785 | return from; | ||
| 786 | } | ||
diff --git a/kernel/timer.c b/kernel/timer.c index 87bd529879c2..3bb01a323b2a 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires) | |||
| 838 | 838 | ||
| 839 | bit = find_last_bit(&mask, BITS_PER_LONG); | 839 | bit = find_last_bit(&mask, BITS_PER_LONG); |
| 840 | 840 | ||
| 841 | mask = (1 << bit) - 1; | 841 | mask = (1UL << bit) - 1; |
| 842 | 842 | ||
| 843 | expires_limit = expires_limit & ~(mask); | 843 | expires_limit = expires_limit & ~(mask); |
| 844 | 844 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1fd4b9479210..4a54a25afa2f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -4330,16 +4330,11 @@ static void ftrace_init_module(struct module *mod, | |||
| 4330 | ftrace_process_locs(mod, start, end); | 4330 | ftrace_process_locs(mod, start, end); |
| 4331 | } | 4331 | } |
| 4332 | 4332 | ||
| 4333 | static int ftrace_module_notify_enter(struct notifier_block *self, | 4333 | void ftrace_module_init(struct module *mod) |
| 4334 | unsigned long val, void *data) | ||
| 4335 | { | 4334 | { |
| 4336 | struct module *mod = data; | 4335 | ftrace_init_module(mod, mod->ftrace_callsites, |
| 4337 | 4336 | mod->ftrace_callsites + | |
| 4338 | if (val == MODULE_STATE_COMING) | 4337 | mod->num_ftrace_callsites); |
| 4339 | ftrace_init_module(mod, mod->ftrace_callsites, | ||
| 4340 | mod->ftrace_callsites + | ||
| 4341 | mod->num_ftrace_callsites); | ||
| 4342 | return 0; | ||
| 4343 | } | 4338 | } |
| 4344 | 4339 | ||
| 4345 | static int ftrace_module_notify_exit(struct notifier_block *self, | 4340 | static int ftrace_module_notify_exit(struct notifier_block *self, |
| @@ -4353,11 +4348,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self, | |||
| 4353 | return 0; | 4348 | return 0; |
| 4354 | } | 4349 | } |
| 4355 | #else | 4350 | #else |
| 4356 | static int ftrace_module_notify_enter(struct notifier_block *self, | ||
| 4357 | unsigned long val, void *data) | ||
| 4358 | { | ||
| 4359 | return 0; | ||
| 4360 | } | ||
| 4361 | static int ftrace_module_notify_exit(struct notifier_block *self, | 4351 | static int ftrace_module_notify_exit(struct notifier_block *self, |
| 4362 | unsigned long val, void *data) | 4352 | unsigned long val, void *data) |
| 4363 | { | 4353 | { |
| @@ -4365,11 +4355,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self, | |||
| 4365 | } | 4355 | } |
| 4366 | #endif /* CONFIG_MODULES */ | 4356 | #endif /* CONFIG_MODULES */ |
| 4367 | 4357 | ||
| 4368 | struct notifier_block ftrace_module_enter_nb = { | ||
| 4369 | .notifier_call = ftrace_module_notify_enter, | ||
| 4370 | .priority = INT_MAX, /* Run before anything that can use kprobes */ | ||
| 4371 | }; | ||
| 4372 | |||
| 4373 | struct notifier_block ftrace_module_exit_nb = { | 4358 | struct notifier_block ftrace_module_exit_nb = { |
| 4374 | .notifier_call = ftrace_module_notify_exit, | 4359 | .notifier_call = ftrace_module_notify_exit, |
| 4375 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | 4360 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ |
| @@ -4403,10 +4388,6 @@ void __init ftrace_init(void) | |||
| 4403 | __start_mcount_loc, | 4388 | __start_mcount_loc, |
| 4404 | __stop_mcount_loc); | 4389 | __stop_mcount_loc); |
| 4405 | 4390 | ||
| 4406 | ret = register_module_notifier(&ftrace_module_enter_nb); | ||
| 4407 | if (ret) | ||
| 4408 | pr_warning("Failed to register trace ftrace module enter notifier\n"); | ||
| 4409 | |||
| 4410 | ret = register_module_notifier(&ftrace_module_exit_nb); | 4391 | ret = register_module_notifier(&ftrace_module_exit_nb); |
| 4411 | if (ret) | 4392 | if (ret) |
| 4412 | pr_warning("Failed to register trace ftrace module exit notifier\n"); | 4393 | pr_warning("Failed to register trace ftrace module exit notifier\n"); |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 925f537f07d1..4747b476a030 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
| @@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec) | |||
| 77 | data->ops->func(data); | 77 | data->ops->func(data); |
| 78 | continue; | 78 | continue; |
| 79 | } | 79 | } |
| 80 | filter = rcu_dereference(data->filter); | 80 | filter = rcu_dereference_sched(data->filter); |
| 81 | if (filter && !filter_match_preds(filter, rec)) | 81 | if (filter && !filter_match_preds(filter, rec)) |
| 82 | continue; | 82 | continue; |
| 83 | if (data->cmd_ops->post_trigger) { | 83 | if (data->cmd_ops->post_trigger) { |
diff --git a/mm/vmacache.c b/mm/vmacache.c index d4224b397c0e..1037a3bab505 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c | |||
| @@ -81,10 +81,12 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) | |||
| 81 | for (i = 0; i < VMACACHE_SIZE; i++) { | 81 | for (i = 0; i < VMACACHE_SIZE; i++) { |
| 82 | struct vm_area_struct *vma = current->vmacache[i]; | 82 | struct vm_area_struct *vma = current->vmacache[i]; |
| 83 | 83 | ||
| 84 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) { | 84 | if (!vma) |
| 85 | BUG_ON(vma->vm_mm != mm); | 85 | continue; |
| 86 | if (WARN_ON_ONCE(vma->vm_mm != mm)) | ||
| 87 | break; | ||
| 88 | if (vma->vm_start <= addr && vma->vm_end > addr) | ||
| 86 | return vma; | 89 | return vma; |
| 87 | } | ||
| 88 | } | 90 | } |
| 89 | 91 | ||
| 90 | return NULL; | 92 | return NULL; |
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 248b90abb882..480bbddbd801 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c | |||
| @@ -1059,24 +1059,26 @@ static void azx_init_cmd_io(struct azx *chip) | |||
| 1059 | 1059 | ||
| 1060 | /* reset the corb hw read pointer */ | 1060 | /* reset the corb hw read pointer */ |
| 1061 | azx_writew(chip, CORBRP, ICH6_CORBRP_RST); | 1061 | azx_writew(chip, CORBRP, ICH6_CORBRP_RST); |
| 1062 | for (timeout = 1000; timeout > 0; timeout--) { | 1062 | if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) { |
| 1063 | if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST) | 1063 | for (timeout = 1000; timeout > 0; timeout--) { |
| 1064 | break; | 1064 | if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST) |
| 1065 | udelay(1); | 1065 | break; |
| 1066 | } | 1066 | udelay(1); |
| 1067 | if (timeout <= 0) | 1067 | } |
| 1068 | dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n", | 1068 | if (timeout <= 0) |
| 1069 | azx_readw(chip, CORBRP)); | 1069 | dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n", |
| 1070 | azx_readw(chip, CORBRP)); | ||
| 1070 | 1071 | ||
| 1071 | azx_writew(chip, CORBRP, 0); | 1072 | azx_writew(chip, CORBRP, 0); |
| 1072 | for (timeout = 1000; timeout > 0; timeout--) { | 1073 | for (timeout = 1000; timeout > 0; timeout--) { |
| 1073 | if (azx_readw(chip, CORBRP) == 0) | 1074 | if (azx_readw(chip, CORBRP) == 0) |
| 1074 | break; | 1075 | break; |
| 1075 | udelay(1); | 1076 | udelay(1); |
| 1077 | } | ||
| 1078 | if (timeout <= 0) | ||
| 1079 | dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n", | ||
| 1080 | azx_readw(chip, CORBRP)); | ||
| 1076 | } | 1081 | } |
| 1077 | if (timeout <= 0) | ||
| 1078 | dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n", | ||
| 1079 | azx_readw(chip, CORBRP)); | ||
| 1080 | 1082 | ||
| 1081 | /* enable corb dma */ | 1083 | /* enable corb dma */ |
| 1082 | azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN); | 1084 | azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN); |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index d6bca62ef387..b540ad71eb0d 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -249,7 +249,8 @@ enum { | |||
| 249 | /* quirks for Nvidia */ | 249 | /* quirks for Nvidia */ |
| 250 | #define AZX_DCAPS_PRESET_NVIDIA \ | 250 | #define AZX_DCAPS_PRESET_NVIDIA \ |
| 251 | (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\ | 251 | (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\ |
| 252 | AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT) | 252 | AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT |\ |
| 253 | AZX_DCAPS_CORBRP_SELF_CLEAR) | ||
| 253 | 254 | ||
| 254 | #define AZX_DCAPS_PRESET_CTHDA \ | 255 | #define AZX_DCAPS_PRESET_CTHDA \ |
| 255 | (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY) | 256 | (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY) |
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h index ba38b819f984..4a7cb01fa912 100644 --- a/sound/pci/hda/hda_priv.h +++ b/sound/pci/hda/hda_priv.h | |||
| @@ -189,6 +189,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; | |||
| 189 | #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ | 189 | #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ |
| 190 | #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ | 190 | #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ |
| 191 | #define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ | 191 | #define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ |
| 192 | #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ | ||
| 192 | 193 | ||
| 193 | /* position fix mode */ | 194 | /* position fix mode */ |
| 194 | enum { | 195 | enum { |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c643dfc0a826..c1952c910339 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -4621,6 +4621,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 4621 | SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), | 4621 | SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 4622 | SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE), | 4622 | SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE), |
| 4623 | SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE), | 4623 | SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE), |
| 4624 | SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), | ||
| 4624 | SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), | 4625 | SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), |
| 4625 | SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | 4626 | SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), |
| 4626 | SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), | 4627 | SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), |
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c index f500905e9373..2acf82f4a08a 100644 --- a/sound/soc/codecs/alc5623.c +++ b/sound/soc/codecs/alc5623.c | |||
| @@ -1018,13 +1018,13 @@ static int alc5623_i2c_probe(struct i2c_client *client, | |||
| 1018 | dev_err(&client->dev, "failed to read vendor ID1: %d\n", ret); | 1018 | dev_err(&client->dev, "failed to read vendor ID1: %d\n", ret); |
| 1019 | return ret; | 1019 | return ret; |
| 1020 | } | 1020 | } |
| 1021 | vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8); | ||
| 1022 | 1021 | ||
| 1023 | ret = regmap_read(alc5623->regmap, ALC5623_VENDOR_ID2, &vid2); | 1022 | ret = regmap_read(alc5623->regmap, ALC5623_VENDOR_ID2, &vid2); |
| 1024 | if (ret < 0) { | 1023 | if (ret < 0) { |
| 1025 | dev_err(&client->dev, "failed to read vendor ID2: %d\n", ret); | 1024 | dev_err(&client->dev, "failed to read vendor ID2: %d\n", ret); |
| 1026 | return ret; | 1025 | return ret; |
| 1027 | } | 1026 | } |
| 1027 | vid2 >>= 8; | ||
| 1028 | 1028 | ||
| 1029 | if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) { | 1029 | if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) { |
| 1030 | dev_err(&client->dev, "unknown or wrong codec\n"); | 1030 | dev_err(&client->dev, "unknown or wrong codec\n"); |
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index 460d35547a68..2213a037c893 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c | |||
| @@ -1229,8 +1229,10 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client, | |||
| 1229 | } | 1229 | } |
| 1230 | 1230 | ||
| 1231 | if (cs42l52->pdata.reset_gpio) { | 1231 | if (cs42l52->pdata.reset_gpio) { |
| 1232 | ret = gpio_request_one(cs42l52->pdata.reset_gpio, | 1232 | ret = devm_gpio_request_one(&i2c_client->dev, |
| 1233 | GPIOF_OUT_INIT_HIGH, "CS42L52 /RST"); | 1233 | cs42l52->pdata.reset_gpio, |
| 1234 | GPIOF_OUT_INIT_HIGH, | ||
| 1235 | "CS42L52 /RST"); | ||
| 1234 | if (ret < 0) { | 1236 | if (ret < 0) { |
| 1235 | dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n", | 1237 | dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n", |
| 1236 | cs42l52->pdata.reset_gpio, ret); | 1238 | cs42l52->pdata.reset_gpio, ret); |
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c index 0ee60a19a263..ae3717992d56 100644 --- a/sound/soc/codecs/cs42l73.c +++ b/sound/soc/codecs/cs42l73.c | |||
| @@ -1443,8 +1443,10 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client, | |||
| 1443 | i2c_set_clientdata(i2c_client, cs42l73); | 1443 | i2c_set_clientdata(i2c_client, cs42l73); |
| 1444 | 1444 | ||
| 1445 | if (cs42l73->pdata.reset_gpio) { | 1445 | if (cs42l73->pdata.reset_gpio) { |
| 1446 | ret = gpio_request_one(cs42l73->pdata.reset_gpio, | 1446 | ret = devm_gpio_request_one(&i2c_client->dev, |
| 1447 | GPIOF_OUT_INIT_HIGH, "CS42L73 /RST"); | 1447 | cs42l73->pdata.reset_gpio, |
| 1448 | GPIOF_OUT_INIT_HIGH, | ||
| 1449 | "CS42L73 /RST"); | ||
| 1448 | if (ret < 0) { | 1450 | if (ret < 0) { |
| 1449 | dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n", | 1451 | dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n", |
| 1450 | cs42l73->pdata.reset_gpio, ret); | 1452 | cs42l73->pdata.reset_gpio, ret); |
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index b1835103e9b4..d7349bc89ad3 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c | |||
| @@ -1399,7 +1399,6 @@ static int aic3x_probe(struct snd_soc_codec *codec) | |||
| 1399 | } | 1399 | } |
| 1400 | 1400 | ||
| 1401 | aic3x_add_widgets(codec); | 1401 | aic3x_add_widgets(codec); |
| 1402 | list_add(&aic3x->list, &reset_list); | ||
| 1403 | 1402 | ||
| 1404 | return 0; | 1403 | return 0; |
| 1405 | 1404 | ||
| @@ -1569,7 +1568,13 @@ static int aic3x_i2c_probe(struct i2c_client *i2c, | |||
| 1569 | 1568 | ||
| 1570 | ret = snd_soc_register_codec(&i2c->dev, | 1569 | ret = snd_soc_register_codec(&i2c->dev, |
| 1571 | &soc_codec_dev_aic3x, &aic3x_dai, 1); | 1570 | &soc_codec_dev_aic3x, &aic3x_dai, 1); |
| 1572 | return ret; | 1571 | |
| 1572 | if (ret != 0) | ||
| 1573 | goto err_gpio; | ||
| 1574 | |||
| 1575 | list_add(&aic3x->list, &reset_list); | ||
| 1576 | |||
| 1577 | return 0; | ||
| 1573 | 1578 | ||
| 1574 | err_gpio: | 1579 | err_gpio: |
| 1575 | if (gpio_is_valid(aic3x->gpio_reset) && | 1580 | if (gpio_is_valid(aic3x->gpio_reset) && |
diff --git a/sound/soc/fsl/fsl_spdif.h b/sound/soc/fsl/fsl_spdif.h index b1266790d117..605a10b2112b 100644 --- a/sound/soc/fsl/fsl_spdif.h +++ b/sound/soc/fsl/fsl_spdif.h | |||
| @@ -144,8 +144,8 @@ enum spdif_gainsel { | |||
| 144 | 144 | ||
| 145 | /* SPDIF Clock register */ | 145 | /* SPDIF Clock register */ |
| 146 | #define STC_SYSCLK_DIV_OFFSET 11 | 146 | #define STC_SYSCLK_DIV_OFFSET 11 |
| 147 | #define STC_SYSCLK_DIV_MASK (0x1ff << STC_TXCLK_SRC_OFFSET) | 147 | #define STC_SYSCLK_DIV_MASK (0x1ff << STC_SYSCLK_DIV_OFFSET) |
| 148 | #define STC_SYSCLK_DIV(x) ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK) | 148 | #define STC_SYSCLK_DIV(x) ((((x) - 1) << STC_SYSCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK) |
| 149 | #define STC_TXCLK_SRC_OFFSET 8 | 149 | #define STC_TXCLK_SRC_OFFSET 8 |
| 150 | #define STC_TXCLK_SRC_MASK (0x7 << STC_TXCLK_SRC_OFFSET) | 150 | #define STC_TXCLK_SRC_MASK (0x7 << STC_TXCLK_SRC_OFFSET) |
| 151 | #define STC_TXCLK_SRC_SET(x) ((x << STC_TXCLK_SRC_OFFSET) & STC_TXCLK_SRC_MASK) | 151 | #define STC_TXCLK_SRC_SET(x) ((x << STC_TXCLK_SRC_OFFSET) & STC_TXCLK_SRC_MASK) |
diff --git a/sound/soc/intel/sst-dsp-priv.h b/sound/soc/intel/sst-dsp-priv.h index fe8e81aad646..30ca14a6a835 100644 --- a/sound/soc/intel/sst-dsp-priv.h +++ b/sound/soc/intel/sst-dsp-priv.h | |||
| @@ -136,7 +136,7 @@ struct sst_module_data { | |||
| 136 | enum sst_data_type data_type; /* type of module data */ | 136 | enum sst_data_type data_type; /* type of module data */ |
| 137 | 137 | ||
| 138 | u32 size; /* size in bytes */ | 138 | u32 size; /* size in bytes */ |
| 139 | u32 offset; /* offset in FW file */ | 139 | int32_t offset; /* offset in FW file */ |
| 140 | u32 data_offset; /* offset in ADSP memory space */ | 140 | u32 data_offset; /* offset in ADSP memory space */ |
| 141 | void *data; /* module data */ | 141 | void *data; /* module data */ |
| 142 | }; | 142 | }; |
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c index f46bb4ddde6f..50e4246d4b57 100644 --- a/sound/soc/intel/sst-haswell-ipc.c +++ b/sound/soc/intel/sst-haswell-ipc.c | |||
| @@ -617,7 +617,7 @@ static void hsw_notification_work(struct work_struct *work) | |||
| 617 | case IPC_POSITION_CHANGED: | 617 | case IPC_POSITION_CHANGED: |
| 618 | trace_ipc_notification("DSP stream position changed for", | 618 | trace_ipc_notification("DSP stream position changed for", |
| 619 | stream->reply.stream_hw_id); | 619 | stream->reply.stream_hw_id); |
| 620 | sst_dsp_inbox_read(hsw->dsp, pos, sizeof(pos)); | 620 | sst_dsp_inbox_read(hsw->dsp, pos, sizeof(*pos)); |
| 621 | 621 | ||
| 622 | if (stream->notify_position) | 622 | if (stream->notify_position) |
| 623 | stream->notify_position(stream, stream->pdata); | 623 | stream->notify_position(stream, stream->pdata); |
| @@ -991,7 +991,8 @@ int sst_hsw_stream_get_volume(struct sst_hsw *hsw, struct sst_hsw_stream *stream | |||
| 991 | return -EINVAL; | 991 | return -EINVAL; |
| 992 | 992 | ||
| 993 | sst_dsp_read(hsw->dsp, volume, | 993 | sst_dsp_read(hsw->dsp, volume, |
| 994 | stream->reply.volume_register_address[channel], sizeof(volume)); | 994 | stream->reply.volume_register_address[channel], |
| 995 | sizeof(*volume)); | ||
| 995 | 996 | ||
| 996 | return 0; | 997 | return 0; |
| 997 | } | 998 | } |
| @@ -1609,7 +1610,7 @@ int sst_hsw_dx_set_state(struct sst_hsw *hsw, | |||
| 1609 | trace_ipc_request("PM enter Dx state", state); | 1610 | trace_ipc_request("PM enter Dx state", state); |
| 1610 | 1611 | ||
| 1611 | ret = ipc_tx_message_wait(hsw, header, &state_, sizeof(state_), | 1612 | ret = ipc_tx_message_wait(hsw, header, &state_, sizeof(state_), |
| 1612 | dx, sizeof(dx)); | 1613 | dx, sizeof(*dx)); |
| 1613 | if (ret < 0) { | 1614 | if (ret < 0) { |
| 1614 | dev_err(hsw->dev, "ipc: error set dx state %d failed\n", state); | 1615 | dev_err(hsw->dev, "ipc: error set dx state %d failed\n", state); |
| 1615 | return ret; | 1616 | return ret; |
diff --git a/sound/soc/jz4740/Makefile b/sound/soc/jz4740/Makefile index be873c1b0c20..d32c540555c4 100644 --- a/sound/soc/jz4740/Makefile +++ b/sound/soc/jz4740/Makefile | |||
| @@ -1,10 +1,8 @@ | |||
| 1 | # | 1 | # |
| 2 | # Jz4740 Platform Support | 2 | # Jz4740 Platform Support |
| 3 | # | 3 | # |
| 4 | snd-soc-jz4740-objs := jz4740-pcm.o | ||
| 5 | snd-soc-jz4740-i2s-objs := jz4740-i2s.o | 4 | snd-soc-jz4740-i2s-objs := jz4740-i2s.o |
| 6 | 5 | ||
| 7 | obj-$(CONFIG_SND_JZ4740_SOC) += snd-soc-jz4740.o | ||
| 8 | obj-$(CONFIG_SND_JZ4740_SOC_I2S) += snd-soc-jz4740-i2s.o | 6 | obj-$(CONFIG_SND_JZ4740_SOC_I2S) += snd-soc-jz4740-i2s.o |
| 9 | 7 | ||
| 10 | # Jz4740 Machine Support | 8 | # Jz4740 Machine Support |
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c index 6232b7d307aa..4d0720ed5a90 100644 --- a/sound/soc/sh/rcar/src.c +++ b/sound/soc/sh/rcar/src.c | |||
| @@ -258,7 +258,7 @@ static int rsnd_src_init(struct rsnd_mod *mod, | |||
| 258 | { | 258 | { |
| 259 | struct rsnd_src *src = rsnd_mod_to_src(mod); | 259 | struct rsnd_src *src = rsnd_mod_to_src(mod); |
| 260 | 260 | ||
| 261 | clk_enable(src->clk); | 261 | clk_prepare_enable(src->clk); |
| 262 | 262 | ||
| 263 | return 0; | 263 | return 0; |
| 264 | } | 264 | } |
| @@ -269,7 +269,7 @@ static int rsnd_src_quit(struct rsnd_mod *mod, | |||
| 269 | { | 269 | { |
| 270 | struct rsnd_src *src = rsnd_mod_to_src(mod); | 270 | struct rsnd_src *src = rsnd_mod_to_src(mod); |
| 271 | 271 | ||
| 272 | clk_disable(src->clk); | 272 | clk_disable_unprepare(src->clk); |
| 273 | 273 | ||
| 274 | return 0; | 274 | return 0; |
| 275 | } | 275 | } |
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 4b7e20603dd7..1d8387c25bd8 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c | |||
| @@ -171,7 +171,7 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi, | |||
| 171 | u32 cr; | 171 | u32 cr; |
| 172 | 172 | ||
| 173 | if (0 == ssi->usrcnt) { | 173 | if (0 == ssi->usrcnt) { |
| 174 | clk_enable(ssi->clk); | 174 | clk_prepare_enable(ssi->clk); |
| 175 | 175 | ||
| 176 | if (rsnd_dai_is_clk_master(rdai)) { | 176 | if (rsnd_dai_is_clk_master(rdai)) { |
| 177 | if (rsnd_ssi_clk_from_parent(ssi)) | 177 | if (rsnd_ssi_clk_from_parent(ssi)) |
| @@ -230,7 +230,7 @@ static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi, | |||
| 230 | rsnd_ssi_master_clk_stop(ssi); | 230 | rsnd_ssi_master_clk_stop(ssi); |
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | clk_disable(ssi->clk); | 233 | clk_disable_unprepare(ssi->clk); |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod)); | 236 | dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod)); |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index c8a780d0d057..7769b0a2bc5a 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
| @@ -254,7 +254,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, | |||
| 254 | static void dapm_kcontrol_free(struct snd_kcontrol *kctl) | 254 | static void dapm_kcontrol_free(struct snd_kcontrol *kctl) |
| 255 | { | 255 | { |
| 256 | struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); | 256 | struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); |
| 257 | kfree(data->widget); | ||
| 258 | kfree(data->wlist); | 257 | kfree(data->wlist); |
| 259 | kfree(data); | 258 | kfree(data); |
| 260 | } | 259 | } |
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c index 7c4347962353..a74fba6d7743 100644 --- a/tools/lib/api/fs/debugfs.c +++ b/tools/lib/api/fs/debugfs.c | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug"; | 12 | char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug"; |
| 13 | 13 | ||
| 14 | static const char * const debugfs_known_mountpoints[] = { | 14 | static const char * const debugfs_known_mountpoints[] = { |
| 15 | "/sys/kernel/debug/", | 15 | "/sys/kernel/debug", |
| 16 | "/debug/", | 16 | "/debug", |
| 17 | 0, | 17 | 0, |
| 18 | }; | 18 | }; |
| 19 | 19 | ||
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index baec7d887da4..b83184f2d484 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c | |||
| @@ -4344,6 +4344,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event | |||
| 4344 | format, len_arg, arg); | 4344 | format, len_arg, arg); |
| 4345 | trace_seq_terminate(&p); | 4345 | trace_seq_terminate(&p); |
| 4346 | trace_seq_puts(s, p.buffer); | 4346 | trace_seq_puts(s, p.buffer); |
| 4347 | trace_seq_destroy(&p); | ||
| 4347 | arg = arg->next; | 4348 | arg = arg->next; |
| 4348 | break; | 4349 | break; |
| 4349 | default: | 4350 | default: |
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 791c539374c7..feab94281634 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h | |||
| @@ -876,8 +876,8 @@ struct event_filter { | |||
| 876 | struct event_filter *pevent_filter_alloc(struct pevent *pevent); | 876 | struct event_filter *pevent_filter_alloc(struct pevent *pevent); |
| 877 | 877 | ||
| 878 | /* for backward compatibility */ | 878 | /* for backward compatibility */ |
| 879 | #define FILTER_NONE PEVENT_ERRNO__FILTER_NOT_FOUND | 879 | #define FILTER_NONE PEVENT_ERRNO__NO_FILTER |
| 880 | #define FILTER_NOEXIST PEVENT_ERRNO__NO_FILTER | 880 | #define FILTER_NOEXIST PEVENT_ERRNO__FILTER_NOT_FOUND |
| 881 | #define FILTER_MISS PEVENT_ERRNO__FILTER_MISS | 881 | #define FILTER_MISS PEVENT_ERRNO__FILTER_MISS |
| 882 | #define FILTER_MATCH PEVENT_ERRNO__FILTER_MATCH | 882 | #define FILTER_MATCH PEVENT_ERRNO__FILTER_MATCH |
| 883 | 883 | ||
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index e96923310d57..895edd32930c 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf | |||
| @@ -589,7 +589,7 @@ $(GTK_OBJS): $(OUTPUT)%.o: %.c $(LIB_H) | |||
| 589 | $(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $< | 589 | $(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $< |
| 590 | 590 | ||
| 591 | $(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS) | 591 | $(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS) |
| 592 | $(QUIET_LINK)$(CC) -o $@ -shared $(ALL_LDFLAGS) $(filter %.o,$^) $(GTK_LIBS) | 592 | $(QUIET_LINK)$(CC) -o $@ -shared $(LDFLAGS) $(filter %.o,$^) $(GTK_LIBS) |
| 593 | 593 | ||
| 594 | $(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS | 594 | $(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS |
| 595 | $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ | 595 | $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ |
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c index b602ad93ce63..83bc2385e6d3 100644 --- a/tools/perf/arch/x86/tests/dwarf-unwind.c +++ b/tools/perf/arch/x86/tests/dwarf-unwind.c | |||
| @@ -23,9 +23,10 @@ static int sample_ustack(struct perf_sample *sample, | |||
| 23 | 23 | ||
| 24 | sp = (unsigned long) regs[PERF_REG_X86_SP]; | 24 | sp = (unsigned long) regs[PERF_REG_X86_SP]; |
| 25 | 25 | ||
| 26 | map = map_groups__find(&thread->mg, MAP__FUNCTION, (u64) sp); | 26 | map = map_groups__find(&thread->mg, MAP__VARIABLE, (u64) sp); |
| 27 | if (!map) { | 27 | if (!map) { |
| 28 | pr_debug("failed to get stack map\n"); | 28 | pr_debug("failed to get stack map\n"); |
| 29 | free(buf); | ||
| 29 | return -1; | 30 | return -1; |
| 30 | } | 31 | } |
| 31 | 32 | ||
diff --git a/tools/perf/arch/x86/tests/regs_load.S b/tools/perf/arch/x86/tests/regs_load.S index 99167bf644ea..60875d5c556c 100644 --- a/tools/perf/arch/x86/tests/regs_load.S +++ b/tools/perf/arch/x86/tests/regs_load.S | |||
| @@ -1,4 +1,3 @@ | |||
| 1 | |||
| 2 | #include <linux/linkage.h> | 1 | #include <linux/linkage.h> |
| 3 | 2 | ||
| 4 | #define AX 0 | 3 | #define AX 0 |
| @@ -90,3 +89,10 @@ ENTRY(perf_regs_load) | |||
| 90 | ret | 89 | ret |
| 91 | ENDPROC(perf_regs_load) | 90 | ENDPROC(perf_regs_load) |
| 92 | #endif | 91 | #endif |
| 92 | |||
| 93 | /* | ||
| 94 | * We need to provide note.GNU-stack section, saying that we want | ||
| 95 | * NOT executable stack. Otherwise the final linking will assume that | ||
| 96 | * the ELF stack should not be restricted at all and set it RWX. | ||
| 97 | */ | ||
| 98 | .section .note.GNU-stack,"",@progbits | ||
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index ee21fa95ebcf..802cf544202b 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile | |||
| @@ -34,6 +34,14 @@ ifeq ($(ARCH),arm) | |||
| 34 | LIBUNWIND_LIBS = -lunwind -lunwind-arm | 34 | LIBUNWIND_LIBS = -lunwind -lunwind-arm |
| 35 | endif | 35 | endif |
| 36 | 36 | ||
| 37 | # So far there's only x86 libdw unwind support merged in perf. | ||
| 38 | # Disable it on all other architectures in case libdw unwind | ||
| 39 | # support is detected in system. Add supported architectures | ||
| 40 | # to the check. | ||
| 41 | ifneq ($(ARCH),x86) | ||
| 42 | NO_LIBDW_DWARF_UNWIND := 1 | ||
| 43 | endif | ||
| 44 | |||
| 37 | ifeq ($(LIBUNWIND_LIBS),) | 45 | ifeq ($(LIBUNWIND_LIBS),) |
| 38 | NO_LIBUNWIND := 1 | 46 | NO_LIBUNWIND := 1 |
| 39 | else | 47 | else |
| @@ -109,6 +117,10 @@ CFLAGS += -Wall | |||
| 109 | CFLAGS += -Wextra | 117 | CFLAGS += -Wextra |
| 110 | CFLAGS += -std=gnu99 | 118 | CFLAGS += -std=gnu99 |
| 111 | 119 | ||
| 120 | # Enforce a non-executable stack, as we may regress (again) in the future by | ||
| 121 | # adding assembler files missing the .GNU-stack linker note. | ||
| 122 | LDFLAGS += -Wl,-z,noexecstack | ||
| 123 | |||
| 112 | EXTLIBS = -lelf -lpthread -lrt -lm -ldl | 124 | EXTLIBS = -lelf -lpthread -lrt -lm -ldl |
| 113 | 125 | ||
| 114 | ifneq ($(OUTPUT),) | 126 | ifneq ($(OUTPUT),) |
| @@ -186,7 +198,10 @@ VF_FEATURE_TESTS = \ | |||
| 186 | stackprotector-all \ | 198 | stackprotector-all \ |
| 187 | timerfd \ | 199 | timerfd \ |
| 188 | libunwind-debug-frame \ | 200 | libunwind-debug-frame \ |
| 189 | bionic | 201 | bionic \ |
| 202 | liberty \ | ||
| 203 | liberty-z \ | ||
| 204 | cplus-demangle | ||
| 190 | 205 | ||
| 191 | # Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features. | 206 | # Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features. |
| 192 | # If in the future we need per-feature checks/flags for features not | 207 | # If in the future we need per-feature checks/flags for features not |
| @@ -504,7 +519,21 @@ else | |||
| 504 | endif | 519 | endif |
| 505 | 520 | ||
| 506 | ifeq ($(feature-libbfd), 1) | 521 | ifeq ($(feature-libbfd), 1) |
| 507 | EXTLIBS += -lbfd -lz -liberty | 522 | EXTLIBS += -lbfd |
| 523 | |||
| 524 | # call all detections now so we get correct | ||
| 525 | # status in VF output | ||
| 526 | $(call feature_check,liberty) | ||
| 527 | $(call feature_check,liberty-z) | ||
| 528 | $(call feature_check,cplus-demangle) | ||
| 529 | |||
| 530 | ifeq ($(feature-liberty), 1) | ||
| 531 | EXTLIBS += -liberty | ||
| 532 | else | ||
| 533 | ifeq ($(feature-liberty-z), 1) | ||
| 534 | EXTLIBS += -liberty -lz | ||
| 535 | endif | ||
| 536 | endif | ||
| 508 | endif | 537 | endif |
| 509 | 538 | ||
| 510 | ifdef NO_DEMANGLE | 539 | ifdef NO_DEMANGLE |
| @@ -515,15 +544,10 @@ else | |||
| 515 | CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT | 544 | CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT |
| 516 | else | 545 | else |
| 517 | ifneq ($(feature-libbfd), 1) | 546 | ifneq ($(feature-libbfd), 1) |
| 518 | $(call feature_check,liberty) | 547 | ifneq ($(feature-liberty), 1) |
| 519 | ifeq ($(feature-liberty), 1) | 548 | ifneq ($(feature-liberty-z), 1) |
| 520 | EXTLIBS += -lbfd -liberty | 549 | # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT |
| 521 | else | 550 | # or any of 'bfd iberty z' trinity |
| 522 | $(call feature_check,liberty-z) | ||
| 523 | ifeq ($(feature-liberty-z), 1) | ||
| 524 | EXTLIBS += -lbfd -liberty -lz | ||
| 525 | else | ||
| 526 | $(call feature_check,cplus-demangle) | ||
| 527 | ifeq ($(feature-cplus-demangle), 1) | 551 | ifeq ($(feature-cplus-demangle), 1) |
| 528 | EXTLIBS += -liberty | 552 | EXTLIBS += -liberty |
| 529 | CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT | 553 | CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT |
diff --git a/tools/perf/tests/make b/tools/perf/tests/make index 5daeae1cb4c0..2f92d6e7ee00 100644 --- a/tools/perf/tests/make +++ b/tools/perf/tests/make | |||
| @@ -46,6 +46,7 @@ make_install_man := install-man | |||
| 46 | make_install_html := install-html | 46 | make_install_html := install-html |
| 47 | make_install_info := install-info | 47 | make_install_info := install-info |
| 48 | make_install_pdf := install-pdf | 48 | make_install_pdf := install-pdf |
| 49 | make_static := LDFLAGS=-static | ||
| 49 | 50 | ||
| 50 | # all the NO_* variable combined | 51 | # all the NO_* variable combined |
| 51 | make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1 | 52 | make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1 |
| @@ -87,6 +88,7 @@ run += make_install_bin | |||
| 87 | # run += make_install_info | 88 | # run += make_install_info |
| 88 | # run += make_install_pdf | 89 | # run += make_install_pdf |
| 89 | run += make_minimal | 90 | run += make_minimal |
| 91 | run += make_static | ||
| 90 | 92 | ||
| 91 | ifneq ($(call has,ctags),) | 93 | ifneq ($(call has,ctags),) |
| 92 | run += make_tags | 94 | run += make_tags |
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index a53cd0b8c151..27c2a5efe450 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c | |||
| @@ -717,7 +717,7 @@ static char *get_kernel_version(const char *root_dir) | |||
| 717 | } | 717 | } |
| 718 | 718 | ||
| 719 | static int map_groups__set_modules_path_dir(struct map_groups *mg, | 719 | static int map_groups__set_modules_path_dir(struct map_groups *mg, |
| 720 | const char *dir_name) | 720 | const char *dir_name, int depth) |
| 721 | { | 721 | { |
| 722 | struct dirent *dent; | 722 | struct dirent *dent; |
| 723 | DIR *dir = opendir(dir_name); | 723 | DIR *dir = opendir(dir_name); |
| @@ -742,7 +742,15 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, | |||
| 742 | !strcmp(dent->d_name, "..")) | 742 | !strcmp(dent->d_name, "..")) |
| 743 | continue; | 743 | continue; |
| 744 | 744 | ||
| 745 | ret = map_groups__set_modules_path_dir(mg, path); | 745 | /* Do not follow top-level source and build symlinks */ |
| 746 | if (depth == 0) { | ||
| 747 | if (!strcmp(dent->d_name, "source") || | ||
| 748 | !strcmp(dent->d_name, "build")) | ||
| 749 | continue; | ||
| 750 | } | ||
| 751 | |||
| 752 | ret = map_groups__set_modules_path_dir(mg, path, | ||
| 753 | depth + 1); | ||
| 746 | if (ret < 0) | 754 | if (ret < 0) |
| 747 | goto out; | 755 | goto out; |
| 748 | } else { | 756 | } else { |
| @@ -786,11 +794,11 @@ static int machine__set_modules_path(struct machine *machine) | |||
| 786 | if (!version) | 794 | if (!version) |
| 787 | return -1; | 795 | return -1; |
| 788 | 796 | ||
| 789 | snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", | 797 | snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", |
| 790 | machine->root_dir, version); | 798 | machine->root_dir, version); |
| 791 | free(version); | 799 | free(version); |
| 792 | 800 | ||
| 793 | return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); | 801 | return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); |
| 794 | } | 802 | } |
| 795 | 803 | ||
| 796 | static int machine__create_module(void *arg, const char *name, u64 start) | 804 | static int machine__create_module(void *arg, const char *name, u64 start) |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 47b29834a6b6..56ff9bebb577 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
| @@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, | |||
| 548 | u32 val; | 548 | u32 val; |
| 549 | u32 *reg; | 549 | u32 *reg; |
| 550 | 550 | ||
| 551 | offset >>= 1; | ||
| 552 | reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, | 551 | reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, |
| 553 | vcpu->vcpu_id, offset); | 552 | vcpu->vcpu_id, offset >> 1); |
| 554 | 553 | ||
| 555 | if (offset & 2) | 554 | if (offset & 4) |
| 556 | val = *reg >> 16; | 555 | val = *reg >> 16; |
| 557 | else | 556 | else |
| 558 | val = *reg & 0xffff; | 557 | val = *reg & 0xffff; |
| @@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, | |||
| 561 | vgic_reg_access(mmio, &val, offset, | 560 | vgic_reg_access(mmio, &val, offset, |
| 562 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | 561 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); |
| 563 | if (mmio->is_write) { | 562 | if (mmio->is_write) { |
| 564 | if (offset < 4) { | 563 | if (offset < 8) { |
| 565 | *reg = ~0U; /* Force PPIs/SGIs to 1 */ | 564 | *reg = ~0U; /* Force PPIs/SGIs to 1 */ |
| 566 | return false; | 565 | return false; |
| 567 | } | 566 | } |
| 568 | 567 | ||
| 569 | val = vgic_cfg_compress(val); | 568 | val = vgic_cfg_compress(val); |
| 570 | if (offset & 2) { | 569 | if (offset & 4) { |
| 571 | *reg &= 0xffff; | 570 | *reg &= 0xffff; |
| 572 | *reg |= val << 16; | 571 | *reg |= val << 16; |
| 573 | } else { | 572 | } else { |
| @@ -916,6 +915,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | |||
| 916 | case 0: | 915 | case 0: |
| 917 | if (!target_cpus) | 916 | if (!target_cpus) |
| 918 | return; | 917 | return; |
| 918 | break; | ||
| 919 | 919 | ||
| 920 | case 1: | 920 | case 1: |
| 921 | target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; | 921 | target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; |
| @@ -1667,10 +1667,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, | |||
| 1667 | if (addr + size < addr) | 1667 | if (addr + size < addr) |
| 1668 | return -EINVAL; | 1668 | return -EINVAL; |
| 1669 | 1669 | ||
| 1670 | *ioaddr = addr; | ||
| 1670 | ret = vgic_ioaddr_overlap(kvm); | 1671 | ret = vgic_ioaddr_overlap(kvm); |
| 1671 | if (ret) | 1672 | if (ret) |
| 1672 | return ret; | 1673 | *ioaddr = VGIC_ADDR_UNDEF; |
| 1673 | *ioaddr = addr; | 1674 | |
| 1674 | return ret; | 1675 | return ret; |
| 1675 | } | 1676 | } |
| 1676 | 1677 | ||
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 8db43701016f..bf06577fea51 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c | |||
| @@ -395,7 +395,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm, | |||
| 395 | if (dev->entries_nr == 0) | 395 | if (dev->entries_nr == 0) |
| 396 | return r; | 396 | return r; |
| 397 | 397 | ||
| 398 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); | 398 | r = pci_enable_msix_exact(dev->dev, |
| 399 | dev->host_msix_entries, dev->entries_nr); | ||
| 399 | if (r) | 400 | if (r) |
| 400 | return r; | 401 | return r; |
| 401 | 402 | ||
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 10df100c4514..06e6401d6ef4 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c | |||
| @@ -101,7 +101,7 @@ static void async_pf_execute(struct work_struct *work) | |||
| 101 | if (waitqueue_active(&vcpu->wq)) | 101 | if (waitqueue_active(&vcpu->wq)) |
| 102 | wake_up_interruptible(&vcpu->wq); | 102 | wake_up_interruptible(&vcpu->wq); |
| 103 | 103 | ||
| 104 | mmdrop(mm); | 104 | mmput(mm); |
| 105 | kvm_put_kvm(vcpu->kvm); | 105 | kvm_put_kvm(vcpu->kvm); |
| 106 | } | 106 | } |
| 107 | 107 | ||
| @@ -118,7 +118,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | |||
| 118 | flush_work(&work->work); | 118 | flush_work(&work->work); |
| 119 | #else | 119 | #else |
| 120 | if (cancel_work_sync(&work->work)) { | 120 | if (cancel_work_sync(&work->work)) { |
| 121 | mmdrop(work->mm); | 121 | mmput(work->mm); |
| 122 | kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ | 122 | kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ |
| 123 | kmem_cache_free(async_pf_cache, work); | 123 | kmem_cache_free(async_pf_cache, work); |
| 124 | } | 124 | } |
| @@ -183,7 +183,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, | |||
| 183 | work->addr = hva; | 183 | work->addr = hva; |
| 184 | work->arch = *arch; | 184 | work->arch = *arch; |
| 185 | work->mm = current->mm; | 185 | work->mm = current->mm; |
| 186 | atomic_inc(&work->mm->mm_count); | 186 | atomic_inc(&work->mm->mm_users); |
| 187 | kvm_get_kvm(work->vcpu->kvm); | 187 | kvm_get_kvm(work->vcpu->kvm); |
| 188 | 188 | ||
| 189 | /* this can't really happen otherwise gfn_to_pfn_async | 189 | /* this can't really happen otherwise gfn_to_pfn_async |
| @@ -201,7 +201,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, | |||
| 201 | return 1; | 201 | return 1; |
| 202 | retry_sync: | 202 | retry_sync: |
| 203 | kvm_put_kvm(work->vcpu->kvm); | 203 | kvm_put_kvm(work->vcpu->kvm); |
| 204 | mmdrop(work->mm); | 204 | mmput(work->mm); |
| 205 | kmem_cache_free(async_pf_cache, work); | 205 | kmem_cache_free(async_pf_cache, work); |
| 206 | return 0; | 206 | return 0; |
| 207 | } | 207 | } |
