diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-03-20 06:27:18 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-03-20 06:27:18 -0400 |
commit | 4958134df54c2c84e9c22ea042761d439164d26e (patch) | |
tree | 503177afab11f7d25b12a84ce25b481d305c51ba | |
parent | c4f528795d1add8b63652673f7262729f679c6c1 (diff) | |
parent | c698ca5278934c0ae32297a8725ced2e27585d7f (diff) |
Merge 4.16-rc6 into tty-next
We want the serial/tty fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
746 files changed, 7575 insertions, 4876 deletions
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt index 611a75e4366e..badb26ac33dc 100644 --- a/Documentation/PCI/pci.txt +++ b/Documentation/PCI/pci.txt | |||
@@ -570,7 +570,9 @@ your driver if they're helpful, or just use plain hex constants. | |||
570 | The device IDs are arbitrary hex numbers (vendor controlled) and normally used | 570 | The device IDs are arbitrary hex numbers (vendor controlled) and normally used |
571 | only in a single location, the pci_device_id table. | 571 | only in a single location, the pci_device_id table. |
572 | 572 | ||
573 | Please DO submit new vendor/device IDs to http://pciids.sourceforge.net/. | 573 | Please DO submit new vendor/device IDs to http://pci-ids.ucw.cz/. |
574 | There are mirrors of the pci.ids file at http://pciids.sourceforge.net/ | ||
575 | and https://github.com/pciutils/pciids. | ||
574 | 576 | ||
575 | 577 | ||
576 | 578 | ||
diff --git a/Documentation/accelerators/ocxl.rst b/Documentation/accelerators/ocxl.rst index 4f7af841d935..ddcc58d01cfb 100644 --- a/Documentation/accelerators/ocxl.rst +++ b/Documentation/accelerators/ocxl.rst | |||
@@ -152,6 +152,11 @@ OCXL_IOCTL_IRQ_SET_FD: | |||
152 | Associate an event fd to an AFU interrupt so that the user process | 152 | Associate an event fd to an AFU interrupt so that the user process |
153 | can be notified when the AFU sends an interrupt. | 153 | can be notified when the AFU sends an interrupt. |
154 | 154 | ||
155 | OCXL_IOCTL_GET_METADATA: | ||
156 | |||
157 | Obtains configuration information from the card, such at the size of | ||
158 | MMIO areas, the AFU version, and the PASID for the current context. | ||
159 | |||
155 | 160 | ||
156 | mmap | 161 | mmap |
157 | ---- | 162 | ---- |
diff --git a/Documentation/devicetree/bindings/misc/arm-charlcd.txt b/Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt index e28e2aac47f1..e28e2aac47f1 100644 --- a/Documentation/devicetree/bindings/misc/arm-charlcd.txt +++ b/Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt | |||
diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt index 217a90eaabe7..9c38bbe7e6d7 100644 --- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt +++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt | |||
@@ -11,7 +11,11 @@ Required properties: | |||
11 | interrupts. | 11 | interrupts. |
12 | 12 | ||
13 | Optional properties: | 13 | Optional properties: |
14 | - clocks: Optional reference to the clock used by the XOR engine. | 14 | - clocks: Optional reference to the clocks used by the XOR engine. |
15 | - clock-names: mandatory if there is a second clock, in this case the | ||
16 | name must be "core" for the first clock and "reg" for the second | ||
17 | one | ||
18 | |||
15 | 19 | ||
16 | Example: | 20 | Example: |
17 | 21 | ||
diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt index 1812c848e369..abfae1beca2b 100644 --- a/Documentation/devicetree/bindings/eeprom/at24.txt +++ b/Documentation/devicetree/bindings/eeprom/at24.txt | |||
@@ -38,9 +38,9 @@ Required properties: | |||
38 | 38 | ||
39 | "catalyst", | 39 | "catalyst", |
40 | "microchip", | 40 | "microchip", |
41 | "nxp", | ||
41 | "ramtron", | 42 | "ramtron", |
42 | "renesas", | 43 | "renesas", |
43 | "nxp", | ||
44 | "st", | 44 | "st", |
45 | 45 | ||
46 | Some vendors use different model names for chips which are just | 46 | Some vendors use different model names for chips which are just |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt index 33c9a10fdc91..20f121daa910 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt | |||
@@ -14,6 +14,7 @@ Required properties: | |||
14 | - "renesas,irqc-r8a7794" (R-Car E2) | 14 | - "renesas,irqc-r8a7794" (R-Car E2) |
15 | - "renesas,intc-ex-r8a7795" (R-Car H3) | 15 | - "renesas,intc-ex-r8a7795" (R-Car H3) |
16 | - "renesas,intc-ex-r8a7796" (R-Car M3-W) | 16 | - "renesas,intc-ex-r8a7796" (R-Car M3-W) |
17 | - "renesas,intc-ex-r8a77965" (R-Car M3-N) | ||
17 | - "renesas,intc-ex-r8a77970" (R-Car V3M) | 18 | - "renesas,intc-ex-r8a77970" (R-Car V3M) |
18 | - "renesas,intc-ex-r8a77995" (R-Car D3) | 19 | - "renesas,intc-ex-r8a77995" (R-Car D3) |
19 | - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in | 20 | - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in |
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt index c902261893b9..92fd4b2f17b2 100644 --- a/Documentation/devicetree/bindings/net/renesas,ravb.txt +++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt | |||
@@ -18,6 +18,7 @@ Required properties: | |||
18 | - "renesas,etheravb-r8a7795" for the R8A7795 SoC. | 18 | - "renesas,etheravb-r8a7795" for the R8A7795 SoC. |
19 | - "renesas,etheravb-r8a7796" for the R8A7796 SoC. | 19 | - "renesas,etheravb-r8a7796" for the R8A7796 SoC. |
20 | - "renesas,etheravb-r8a77970" for the R8A77970 SoC. | 20 | - "renesas,etheravb-r8a77970" for the R8A77970 SoC. |
21 | - "renesas,etheravb-r8a77980" for the R8A77980 SoC. | ||
21 | - "renesas,etheravb-r8a77995" for the R8A77995 SoC. | 22 | - "renesas,etheravb-r8a77995" for the R8A77995 SoC. |
22 | - "renesas,etheravb-rcar-gen3" as a fallback for the above | 23 | - "renesas,etheravb-rcar-gen3" as a fallback for the above |
23 | R-Car Gen3 devices. | 24 | R-Car Gen3 devices. |
diff --git a/Documentation/devicetree/bindings/power/wakeup-source.txt b/Documentation/devicetree/bindings/power/wakeup-source.txt index 3c81f78b5c27..5d254ab13ebf 100644 --- a/Documentation/devicetree/bindings/power/wakeup-source.txt +++ b/Documentation/devicetree/bindings/power/wakeup-source.txt | |||
@@ -60,7 +60,7 @@ Examples | |||
60 | #size-cells = <0>; | 60 | #size-cells = <0>; |
61 | 61 | ||
62 | button@1 { | 62 | button@1 { |
63 | debounce_interval = <50>; | 63 | debounce-interval = <50>; |
64 | wakeup-source; | 64 | wakeup-source; |
65 | linux,code = <116>; | 65 | linux,code = <116>; |
66 | label = "POWER"; | 66 | label = "POWER"; |
diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.txt b/Documentation/devicetree/bindings/thermal/imx-thermal.txt index 28be51afdb6a..379eb763073e 100644 --- a/Documentation/devicetree/bindings/thermal/imx-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/imx-thermal.txt | |||
@@ -22,7 +22,32 @@ Optional properties: | |||
22 | - clocks : thermal sensor's clock source. | 22 | - clocks : thermal sensor's clock source. |
23 | 23 | ||
24 | Example: | 24 | Example: |
25 | ocotp: ocotp@21bc000 { | ||
26 | #address-cells = <1>; | ||
27 | #size-cells = <1>; | ||
28 | compatible = "fsl,imx6sx-ocotp", "syscon"; | ||
29 | reg = <0x021bc000 0x4000>; | ||
30 | clocks = <&clks IMX6SX_CLK_OCOTP>; | ||
25 | 31 | ||
32 | tempmon_calib: calib@38 { | ||
33 | reg = <0x38 4>; | ||
34 | }; | ||
35 | |||
36 | tempmon_temp_grade: temp-grade@20 { | ||
37 | reg = <0x20 4>; | ||
38 | }; | ||
39 | }; | ||
40 | |||
41 | tempmon: tempmon { | ||
42 | compatible = "fsl,imx6sx-tempmon", "fsl,imx6q-tempmon"; | ||
43 | interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; | ||
44 | fsl,tempmon = <&anatop>; | ||
45 | nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>; | ||
46 | nvmem-cell-names = "calib", "temp_grade"; | ||
47 | clocks = <&clks IMX6SX_CLK_PLL3_USB_OTG>; | ||
48 | }; | ||
49 | |||
50 | Legacy method (Deprecated): | ||
26 | tempmon { | 51 | tempmon { |
27 | compatible = "fsl,imx6q-tempmon"; | 52 | compatible = "fsl,imx6q-tempmon"; |
28 | fsl,tempmon = <&anatop>; | 53 | fsl,tempmon = <&anatop>; |
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt index e64d903bcbe8..46da5f184460 100644 --- a/Documentation/devicetree/bindings/usb/dwc2.txt +++ b/Documentation/devicetree/bindings/usb/dwc2.txt | |||
@@ -19,7 +19,7 @@ Required properties: | |||
19 | configured in FS mode; | 19 | configured in FS mode; |
20 | - "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs | 20 | - "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs |
21 | configured in HS mode; | 21 | configured in HS mode; |
22 | - "st,stm32f7xx-hsotg": The DWC2 USB HS controller instance in STM32F7xx SoCs | 22 | - "st,stm32f7-hsotg": The DWC2 USB HS controller instance in STM32F7 SoCs |
23 | configured in HS mode; | 23 | configured in HS mode; |
24 | - reg : Should contain 1 register range (address and length) | 24 | - reg : Should contain 1 register range (address and length) |
25 | - interrupts : Should contain 1 interrupt | 25 | - interrupts : Should contain 1 interrupt |
diff --git a/Documentation/devicetree/bindings/usb/renesas_usb3.txt b/Documentation/devicetree/bindings/usb/renesas_usb3.txt index 87a45e2f9b7f..2c071bb5801e 100644 --- a/Documentation/devicetree/bindings/usb/renesas_usb3.txt +++ b/Documentation/devicetree/bindings/usb/renesas_usb3.txt | |||
@@ -4,6 +4,7 @@ Required properties: | |||
4 | - compatible: Must contain one of the following: | 4 | - compatible: Must contain one of the following: |
5 | - "renesas,r8a7795-usb3-peri" | 5 | - "renesas,r8a7795-usb3-peri" |
6 | - "renesas,r8a7796-usb3-peri" | 6 | - "renesas,r8a7796-usb3-peri" |
7 | - "renesas,r8a77965-usb3-peri" | ||
7 | - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible | 8 | - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible |
8 | device | 9 | device |
9 | 10 | ||
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt index d060172f1529..43960faf5a88 100644 --- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt +++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt | |||
@@ -12,6 +12,7 @@ Required properties: | |||
12 | - "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device | 12 | - "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device |
13 | - "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device | 13 | - "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device |
14 | - "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device | 14 | - "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device |
15 | - "renesas,usbhs-r8a77965" for r8a77965 (R-Car M3-N) compatible device | ||
15 | - "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device | 16 | - "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device |
16 | - "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device | 17 | - "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device |
17 | - "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices | 18 | - "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices |
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt index e2ea59bbca93..1651483a7048 100644 --- a/Documentation/devicetree/bindings/usb/usb-xhci.txt +++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt | |||
@@ -13,6 +13,7 @@ Required properties: | |||
13 | - "renesas,xhci-r8a7793" for r8a7793 SoC | 13 | - "renesas,xhci-r8a7793" for r8a7793 SoC |
14 | - "renesas,xhci-r8a7795" for r8a7795 SoC | 14 | - "renesas,xhci-r8a7795" for r8a7795 SoC |
15 | - "renesas,xhci-r8a7796" for r8a7796 SoC | 15 | - "renesas,xhci-r8a7796" for r8a7796 SoC |
16 | - "renesas,xhci-r8a77965" for r8a77965 SoC | ||
16 | - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible | 17 | - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible |
17 | device | 18 | device |
18 | - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device | 19 | - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device |
diff --git a/Documentation/ia64/serial.txt b/Documentation/ia64/serial.txt index 6869c73de4e2..a63d2c54329b 100644 --- a/Documentation/ia64/serial.txt +++ b/Documentation/ia64/serial.txt | |||
@@ -111,7 +111,7 @@ TROUBLESHOOTING SERIAL CONSOLE PROBLEMS | |||
111 | 111 | ||
112 | - If you don't have an HCDP, the kernel doesn't know where | 112 | - If you don't have an HCDP, the kernel doesn't know where |
113 | your console lives until the driver discovers serial | 113 | your console lives until the driver discovers serial |
114 | devices. Use "console=uart, io,0x3f8" (or appropriate | 114 | devices. Use "console=uart,io,0x3f8" (or appropriate |
115 | address for your machine). | 115 | address for your machine). |
116 | 116 | ||
117 | Kernel and init script output works fine, but no "login:" prompt: | 117 | Kernel and init script output works fine, but no "login:" prompt: |
diff --git a/Documentation/media/dmx.h.rst.exceptions b/Documentation/media/dmx.h.rst.exceptions index 63f55a9ae2b1..a8c4239ed95b 100644 --- a/Documentation/media/dmx.h.rst.exceptions +++ b/Documentation/media/dmx.h.rst.exceptions | |||
@@ -50,9 +50,15 @@ replace typedef dmx_filter_t :c:type:`dmx_filter` | |||
50 | replace typedef dmx_pes_type_t :c:type:`dmx_pes_type` | 50 | replace typedef dmx_pes_type_t :c:type:`dmx_pes_type` |
51 | replace typedef dmx_input_t :c:type:`dmx_input` | 51 | replace typedef dmx_input_t :c:type:`dmx_input` |
52 | 52 | ||
53 | ignore symbol DMX_OUT_DECODER | 53 | replace symbol DMX_BUFFER_FLAG_HAD_CRC32_DISCARD :c:type:`dmx_buffer_flags` |
54 | ignore symbol DMX_OUT_TAP | 54 | replace symbol DMX_BUFFER_FLAG_TEI :c:type:`dmx_buffer_flags` |
55 | ignore symbol DMX_OUT_TS_TAP | 55 | replace symbol DMX_BUFFER_PKT_COUNTER_MISMATCH :c:type:`dmx_buffer_flags` |
56 | ignore symbol DMX_OUT_TSDEMUX_TAP | 56 | replace symbol DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED :c:type:`dmx_buffer_flags` |
57 | replace symbol DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR :c:type:`dmx_buffer_flags` | ||
58 | |||
59 | replace symbol DMX_OUT_DECODER :c:type:`dmx_output` | ||
60 | replace symbol DMX_OUT_TAP :c:type:`dmx_output` | ||
61 | replace symbol DMX_OUT_TS_TAP :c:type:`dmx_output` | ||
62 | replace symbol DMX_OUT_TSDEMUX_TAP :c:type:`dmx_output` | ||
57 | 63 | ||
58 | replace ioctl DMX_DQBUF dmx_qbuf | 64 | replace ioctl DMX_DQBUF dmx_qbuf |
diff --git a/Documentation/media/uapi/dvb/dmx-qbuf.rst b/Documentation/media/uapi/dvb/dmx-qbuf.rst index b48c4931658e..be5a4c6f1904 100644 --- a/Documentation/media/uapi/dvb/dmx-qbuf.rst +++ b/Documentation/media/uapi/dvb/dmx-qbuf.rst | |||
@@ -51,9 +51,10 @@ out to disk. Buffers remain locked until dequeued, until the | |||
51 | the device is closed. | 51 | the device is closed. |
52 | 52 | ||
53 | Applications call the ``DMX_DQBUF`` ioctl to dequeue a filled | 53 | Applications call the ``DMX_DQBUF`` ioctl to dequeue a filled |
54 | (capturing) buffer from the driver's outgoing queue. They just set the ``reserved`` field array to zero. When ``DMX_DQBUF`` is called with a | 54 | (capturing) buffer from the driver's outgoing queue. |
55 | pointer to this structure, the driver fills the remaining fields or | 55 | They just set the ``index`` field withe the buffer ID to be queued. |
56 | returns an error code. | 56 | When ``DMX_DQBUF`` is called with a pointer to struct :c:type:`dmx_buffer`, |
57 | the driver fills the remaining fields or returns an error code. | ||
57 | 58 | ||
58 | By default ``DMX_DQBUF`` blocks when no buffer is in the outgoing | 59 | By default ``DMX_DQBUF`` blocks when no buffer is in the outgoing |
59 | queue. When the ``O_NONBLOCK`` flag was given to the | 60 | queue. When the ``O_NONBLOCK`` flag was given to the |
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py index 39aa9e8697cc..fbedcc39460b 100644 --- a/Documentation/sphinx/kerneldoc.py +++ b/Documentation/sphinx/kerneldoc.py | |||
@@ -36,8 +36,7 @@ import glob | |||
36 | 36 | ||
37 | from docutils import nodes, statemachine | 37 | from docutils import nodes, statemachine |
38 | from docutils.statemachine import ViewList | 38 | from docutils.statemachine import ViewList |
39 | from docutils.parsers.rst import directives | 39 | from docutils.parsers.rst import directives, Directive |
40 | from sphinx.util.compat import Directive | ||
41 | from sphinx.ext.autodoc import AutodocReporter | 40 | from sphinx.ext.autodoc import AutodocReporter |
42 | 41 | ||
43 | __version__ = '1.0' | 42 | __version__ = '1.0' |
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 792fa8717d13..d6b3ff51a14f 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -123,14 +123,15 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the | |||
123 | flag KVM_VM_MIPS_VZ. | 123 | flag KVM_VM_MIPS_VZ. |
124 | 124 | ||
125 | 125 | ||
126 | 4.3 KVM_GET_MSR_INDEX_LIST | 126 | 4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST |
127 | 127 | ||
128 | Capability: basic | 128 | Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST |
129 | Architectures: x86 | 129 | Architectures: x86 |
130 | Type: system | 130 | Type: system ioctl |
131 | Parameters: struct kvm_msr_list (in/out) | 131 | Parameters: struct kvm_msr_list (in/out) |
132 | Returns: 0 on success; -1 on error | 132 | Returns: 0 on success; -1 on error |
133 | Errors: | 133 | Errors: |
134 | EFAULT: the msr index list cannot be read from or written to | ||
134 | E2BIG: the msr index list is to be to fit in the array specified by | 135 | E2BIG: the msr index list is to be to fit in the array specified by |
135 | the user. | 136 | the user. |
136 | 137 | ||
@@ -139,16 +140,23 @@ struct kvm_msr_list { | |||
139 | __u32 indices[0]; | 140 | __u32 indices[0]; |
140 | }; | 141 | }; |
141 | 142 | ||
142 | This ioctl returns the guest msrs that are supported. The list varies | 143 | The user fills in the size of the indices array in nmsrs, and in return |
143 | by kvm version and host processor, but does not change otherwise. The | 144 | kvm adjusts nmsrs to reflect the actual number of msrs and fills in the |
144 | user fills in the size of the indices array in nmsrs, and in return | 145 | indices array with their numbers. |
145 | kvm adjusts nmsrs to reflect the actual number of msrs and fills in | 146 | |
146 | the indices array with their numbers. | 147 | KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported. The list |
148 | varies by kvm version and host processor, but does not change otherwise. | ||
147 | 149 | ||
148 | Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are | 150 | Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are |
149 | not returned in the MSR list, as different vcpus can have a different number | 151 | not returned in the MSR list, as different vcpus can have a different number |
150 | of banks, as set via the KVM_X86_SETUP_MCE ioctl. | 152 | of banks, as set via the KVM_X86_SETUP_MCE ioctl. |
151 | 153 | ||
154 | KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed | ||
155 | to the KVM_GET_MSRS system ioctl. This lets userspace probe host capabilities | ||
156 | and processor features that are exposed via MSRs (e.g., VMX capabilities). | ||
157 | This list also varies by kvm version and host processor, but does not change | ||
158 | otherwise. | ||
159 | |||
152 | 160 | ||
153 | 4.4 KVM_CHECK_EXTENSION | 161 | 4.4 KVM_CHECK_EXTENSION |
154 | 162 | ||
@@ -475,14 +483,22 @@ Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead. | |||
475 | 483 | ||
476 | 4.18 KVM_GET_MSRS | 484 | 4.18 KVM_GET_MSRS |
477 | 485 | ||
478 | Capability: basic | 486 | Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system) |
479 | Architectures: x86 | 487 | Architectures: x86 |
480 | Type: vcpu ioctl | 488 | Type: system ioctl, vcpu ioctl |
481 | Parameters: struct kvm_msrs (in/out) | 489 | Parameters: struct kvm_msrs (in/out) |
482 | Returns: 0 on success, -1 on error | 490 | Returns: number of msrs successfully returned; |
491 | -1 on error | ||
492 | |||
493 | When used as a system ioctl: | ||
494 | Reads the values of MSR-based features that are available for the VM. This | ||
495 | is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values. | ||
496 | The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST | ||
497 | in a system ioctl. | ||
483 | 498 | ||
499 | When used as a vcpu ioctl: | ||
484 | Reads model-specific registers from the vcpu. Supported msr indices can | 500 | Reads model-specific registers from the vcpu. Supported msr indices can |
485 | be obtained using KVM_GET_MSR_INDEX_LIST. | 501 | be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl. |
486 | 502 | ||
487 | struct kvm_msrs { | 503 | struct kvm_msrs { |
488 | __u32 nmsrs; /* number of msrs in entries */ | 504 | __u32 nmsrs; /* number of msrs in entries */ |
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index dcab6dc11e3b..87a7506f31c2 100644 --- a/Documentation/virtual/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt | |||
@@ -58,6 +58,10 @@ KVM_FEATURE_PV_TLB_FLUSH || 9 || guest checks this feature bit | |||
58 | || || before enabling paravirtualized | 58 | || || before enabling paravirtualized |
59 | || || tlb flush. | 59 | || || tlb flush. |
60 | ------------------------------------------------------------------------------ | 60 | ------------------------------------------------------------------------------ |
61 | KVM_FEATURE_ASYNC_PF_VMEXIT || 10 || paravirtualized async PF VM exit | ||
62 | || || can be enabled by setting bit 2 | ||
63 | || || when writing to msr 0x4b564d02 | ||
64 | ------------------------------------------------------------------------------ | ||
61 | KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side | 65 | KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side |
62 | || || per-cpu warps are expected in | 66 | || || per-cpu warps are expected in |
63 | || || kvmclock. | 67 | || || kvmclock. |
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt index 1ebecc115dc6..f3f0d57ced8e 100644 --- a/Documentation/virtual/kvm/msr.txt +++ b/Documentation/virtual/kvm/msr.txt | |||
@@ -170,7 +170,8 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02 | |||
170 | when asynchronous page faults are enabled on the vcpu 0 when | 170 | when asynchronous page faults are enabled on the vcpu 0 when |
171 | disabled. Bit 1 is 1 if asynchronous page faults can be injected | 171 | disabled. Bit 1 is 1 if asynchronous page faults can be injected |
172 | when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults | 172 | when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults |
173 | are delivered to L1 as #PF vmexits. | 173 | are delivered to L1 as #PF vmexits. Bit 2 can be set only if |
174 | KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID. | ||
174 | 175 | ||
175 | First 4 byte of 64 byte memory location will be written to by | 176 | First 4 byte of 64 byte memory location will be written to by |
176 | the hypervisor at the time of asynchronous page fault (APF) | 177 | the hypervisor at the time of asynchronous page fault (APF) |
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt index 756fd76b78a6..71c30984e94d 100644 --- a/Documentation/x86/intel_rdt_ui.txt +++ b/Documentation/x86/intel_rdt_ui.txt | |||
@@ -671,7 +671,7 @@ occupancy of the real time threads on these cores. | |||
671 | # mkdir p1 | 671 | # mkdir p1 |
672 | 672 | ||
673 | Move the cpus 4-7 over to p1 | 673 | Move the cpus 4-7 over to p1 |
674 | # echo f0 > p0/cpus | 674 | # echo f0 > p1/cpus |
675 | 675 | ||
676 | View the llc occupancy snapshot | 676 | View the llc occupancy snapshot |
677 | 677 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 93a12af4f180..205c8fc12a9c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1238,7 +1238,7 @@ F: drivers/clk/at91 | |||
1238 | 1238 | ||
1239 | ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT | 1239 | ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT |
1240 | M: Nicolas Ferre <nicolas.ferre@microchip.com> | 1240 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
1241 | M: Alexandre Belloni <alexandre.belloni@free-electrons.com> | 1241 | M: Alexandre Belloni <alexandre.belloni@bootlin.com> |
1242 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1242 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1243 | W: http://www.linux4sam.org | 1243 | W: http://www.linux4sam.org |
1244 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git | 1244 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git |
@@ -1590,7 +1590,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support | |||
1590 | M: Jason Cooper <jason@lakedaemon.net> | 1590 | M: Jason Cooper <jason@lakedaemon.net> |
1591 | M: Andrew Lunn <andrew@lunn.ch> | 1591 | M: Andrew Lunn <andrew@lunn.ch> |
1592 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 1592 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
1593 | M: Gregory Clement <gregory.clement@free-electrons.com> | 1593 | M: Gregory Clement <gregory.clement@bootlin.com> |
1594 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1594 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1595 | S: Maintained | 1595 | S: Maintained |
1596 | F: Documentation/devicetree/bindings/soc/dove/ | 1596 | F: Documentation/devicetree/bindings/soc/dove/ |
@@ -1604,7 +1604,7 @@ F: arch/arm/boot/dts/orion5x* | |||
1604 | ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support | 1604 | ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support |
1605 | M: Jason Cooper <jason@lakedaemon.net> | 1605 | M: Jason Cooper <jason@lakedaemon.net> |
1606 | M: Andrew Lunn <andrew@lunn.ch> | 1606 | M: Andrew Lunn <andrew@lunn.ch> |
1607 | M: Gregory Clement <gregory.clement@free-electrons.com> | 1607 | M: Gregory Clement <gregory.clement@bootlin.com> |
1608 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 1608 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
1609 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1609 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1610 | S: Maintained | 1610 | S: Maintained |
@@ -1999,8 +1999,10 @@ M: Maxime Coquelin <mcoquelin.stm32@gmail.com> | |||
1999 | M: Alexandre Torgue <alexandre.torgue@st.com> | 1999 | M: Alexandre Torgue <alexandre.torgue@st.com> |
2000 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 2000 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
2001 | S: Maintained | 2001 | S: Maintained |
2002 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git | 2002 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/atorgue/stm32.git stm32-next |
2003 | N: stm32 | 2003 | N: stm32 |
2004 | F: arch/arm/boot/dts/stm32* | ||
2005 | F: arch/arm/mach-stm32/ | ||
2004 | F: drivers/clocksource/armv7m_systick.c | 2006 | F: drivers/clocksource/armv7m_systick.c |
2005 | 2007 | ||
2006 | ARM/TANGO ARCHITECTURE | 2008 | ARM/TANGO ARCHITECTURE |
@@ -7600,8 +7602,10 @@ F: mm/kasan/ | |||
7600 | F: scripts/Makefile.kasan | 7602 | F: scripts/Makefile.kasan |
7601 | 7603 | ||
7602 | KCONFIG | 7604 | KCONFIG |
7605 | M: Masahiro Yamada <yamada.masahiro@socionext.com> | ||
7606 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kconfig | ||
7603 | L: linux-kbuild@vger.kernel.org | 7607 | L: linux-kbuild@vger.kernel.org |
7604 | S: Orphan | 7608 | S: Maintained |
7605 | F: Documentation/kbuild/kconfig-language.txt | 7609 | F: Documentation/kbuild/kconfig-language.txt |
7606 | F: scripts/kconfig/ | 7610 | F: scripts/kconfig/ |
7607 | 7611 | ||
@@ -9921,6 +9925,13 @@ F: Documentation/ABI/stable/sysfs-bus-nvmem | |||
9921 | F: include/linux/nvmem-consumer.h | 9925 | F: include/linux/nvmem-consumer.h |
9922 | F: include/linux/nvmem-provider.h | 9926 | F: include/linux/nvmem-provider.h |
9923 | 9927 | ||
9928 | NXP SGTL5000 DRIVER | ||
9929 | M: Fabio Estevam <fabio.estevam@nxp.com> | ||
9930 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | ||
9931 | S: Maintained | ||
9932 | F: Documentation/devicetree/bindings/sound/sgtl5000.txt | ||
9933 | F: sound/soc/codecs/sgtl5000* | ||
9934 | |||
9924 | NXP TDA998X DRM DRIVER | 9935 | NXP TDA998X DRM DRIVER |
9925 | M: Russell King <linux@armlinux.org.uk> | 9936 | M: Russell King <linux@armlinux.org.uk> |
9926 | S: Supported | 9937 | S: Supported |
@@ -10926,6 +10937,17 @@ L: linux-gpio@vger.kernel.org | |||
10926 | S: Supported | 10937 | S: Supported |
10927 | F: drivers/pinctrl/pinctrl-at91-pio4.* | 10938 | F: drivers/pinctrl/pinctrl-at91-pio4.* |
10928 | 10939 | ||
10940 | PIN CONTROLLER - FREESCALE | ||
10941 | M: Dong Aisheng <aisheng.dong@nxp.com> | ||
10942 | M: Fabio Estevam <festevam@gmail.com> | ||
10943 | M: Shawn Guo <shawnguo@kernel.org> | ||
10944 | M: Stefan Agner <stefan@agner.ch> | ||
10945 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
10946 | L: linux-gpio@vger.kernel.org | ||
10947 | S: Maintained | ||
10948 | F: drivers/pinctrl/freescale/ | ||
10949 | F: Documentation/devicetree/bindings/pinctrl/fsl,* | ||
10950 | |||
10929 | PIN CONTROLLER - INTEL | 10951 | PIN CONTROLLER - INTEL |
10930 | M: Mika Westerberg <mika.westerberg@linux.intel.com> | 10952 | M: Mika Westerberg <mika.westerberg@linux.intel.com> |
10931 | M: Heikki Krogerus <heikki.krogerus@linux.intel.com> | 10953 | M: Heikki Krogerus <heikki.krogerus@linux.intel.com> |
@@ -12092,6 +12114,7 @@ M: Sylwester Nawrocki <s.nawrocki@samsung.com> | |||
12092 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 12114 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
12093 | S: Supported | 12115 | S: Supported |
12094 | F: sound/soc/samsung/ | 12116 | F: sound/soc/samsung/ |
12117 | F: Documentation/devicetree/bindings/sound/samsung* | ||
12095 | 12118 | ||
12096 | SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER | 12119 | SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER |
12097 | M: Krzysztof Kozlowski <krzk@kernel.org> | 12120 | M: Krzysztof Kozlowski <krzk@kernel.org> |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 16 | 3 | PATCHLEVEL = 16 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc3 | 5 | EXTRAVERSION = -rc6 |
6 | NAME = Fearless Coyote | 6 | NAME = Fearless Coyote |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
@@ -388,7 +388,7 @@ PYTHON = python | |||
388 | CHECK = sparse | 388 | CHECK = sparse |
389 | 389 | ||
390 | CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ | 390 | CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ |
391 | -Wbitwise -Wno-return-void $(CF) | 391 | -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) |
392 | NOSTDINC_FLAGS = | 392 | NOSTDINC_FLAGS = |
393 | CFLAGS_MODULE = | 393 | CFLAGS_MODULE = |
394 | AFLAGS_MODULE = | 394 | AFLAGS_MODULE = |
@@ -489,6 +489,11 @@ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) | |||
489 | KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) | 489 | KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) |
490 | endif | 490 | endif |
491 | 491 | ||
492 | RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register | ||
493 | RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk | ||
494 | RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) | ||
495 | export RETPOLINE_CFLAGS | ||
496 | |||
492 | ifeq ($(config-targets),1) | 497 | ifeq ($(config-targets),1) |
493 | # =========================================================================== | 498 | # =========================================================================== |
494 | # *config targets only - make sure prerequisites are updated, and descend | 499 | # *config targets only - make sure prerequisites are updated, and descend |
@@ -579,10 +584,9 @@ ifeq ($(KBUILD_EXTMOD),) | |||
579 | # To avoid any implicit rule to kick in, define an empty command | 584 | # To avoid any implicit rule to kick in, define an empty command |
580 | $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ; | 585 | $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ; |
581 | 586 | ||
582 | # If .config is newer than include/config/auto.conf, someone tinkered | 587 | # The actual configuration files used during the build are stored in |
583 | # with it and forgot to run make oldconfig. | 588 | # include/generated/ and include/config/. Update them if .config is newer than |
584 | # if auto.conf.cmd is missing then we are probably in a cleaned tree so | 589 | # include/config/auto.conf (which mirrors .config). |
585 | # we execute the config step to be sure to catch updated Kconfig files | ||
586 | include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd | 590 | include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd |
587 | $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig | 591 | $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig |
588 | else | 592 | else |
@@ -857,8 +861,7 @@ KBUILD_AFLAGS += $(ARCH_AFLAGS) $(KAFLAGS) | |||
857 | KBUILD_CFLAGS += $(ARCH_CFLAGS) $(KCFLAGS) | 861 | KBUILD_CFLAGS += $(ARCH_CFLAGS) $(KCFLAGS) |
858 | 862 | ||
859 | # Use --build-id when available. | 863 | # Use --build-id when available. |
860 | LDFLAGS_BUILD_ID := $(patsubst -Wl$(comma)%,%,\ | 864 | LDFLAGS_BUILD_ID := $(call ld-option, --build-id) |
861 | $(call cc-ldoption, -Wl$(comma)--build-id,)) | ||
862 | KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID) | 865 | KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID) |
863 | LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID) | 866 | LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID) |
864 | 867 | ||
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index f3a80cf164cc..d76bf4a83740 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -484,7 +484,6 @@ config ARC_CURR_IN_REG | |||
484 | 484 | ||
485 | config ARC_EMUL_UNALIGNED | 485 | config ARC_EMUL_UNALIGNED |
486 | bool "Emulate unaligned memory access (userspace only)" | 486 | bool "Emulate unaligned memory access (userspace only)" |
487 | default N | ||
488 | select SYSCTL_ARCH_UNALIGN_NO_WARN | 487 | select SYSCTL_ARCH_UNALIGN_NO_WARN |
489 | select SYSCTL_ARCH_UNALIGN_ALLOW | 488 | select SYSCTL_ARCH_UNALIGN_ALLOW |
490 | depends on ISA_ARCOMPACT | 489 | depends on ISA_ARCOMPACT |
diff --git a/arch/arc/boot/dts/axs101.dts b/arch/arc/boot/dts/axs101.dts index 70aec7d6ca60..626b694c7be7 100644 --- a/arch/arc/boot/dts/axs101.dts +++ b/arch/arc/boot/dts/axs101.dts | |||
@@ -17,6 +17,6 @@ | |||
17 | compatible = "snps,axs101", "snps,arc-sdp"; | 17 | compatible = "snps,axs101", "snps,arc-sdp"; |
18 | 18 | ||
19 | chosen { | 19 | chosen { |
20 | bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60"; | 20 | bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60 print-fatal-signals=1"; |
21 | }; | 21 | }; |
22 | }; | 22 | }; |
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi index 74d070cd3c13..47b74fbc403c 100644 --- a/arch/arc/boot/dts/axs10x_mb.dtsi +++ b/arch/arc/boot/dts/axs10x_mb.dtsi | |||
@@ -214,13 +214,13 @@ | |||
214 | }; | 214 | }; |
215 | 215 | ||
216 | eeprom@0x54{ | 216 | eeprom@0x54{ |
217 | compatible = "24c01"; | 217 | compatible = "atmel,24c01"; |
218 | reg = <0x54>; | 218 | reg = <0x54>; |
219 | pagesize = <0x8>; | 219 | pagesize = <0x8>; |
220 | }; | 220 | }; |
221 | 221 | ||
222 | eeprom@0x57{ | 222 | eeprom@0x57{ |
223 | compatible = "24c04"; | 223 | compatible = "atmel,24c04"; |
224 | reg = <0x57>; | 224 | reg = <0x57>; |
225 | pagesize = <0x8>; | 225 | pagesize = <0x8>; |
226 | }; | 226 | }; |
diff --git a/arch/arc/boot/dts/haps_hs_idu.dts b/arch/arc/boot/dts/haps_hs_idu.dts index 215cddd0b63b..0c603308aeb3 100644 --- a/arch/arc/boot/dts/haps_hs_idu.dts +++ b/arch/arc/boot/dts/haps_hs_idu.dts | |||
@@ -22,7 +22,7 @@ | |||
22 | }; | 22 | }; |
23 | 23 | ||
24 | chosen { | 24 | chosen { |
25 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug"; | 25 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1"; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | aliases { | 28 | aliases { |
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts index 5ee96b067c08..ff2f2c70c545 100644 --- a/arch/arc/boot/dts/nsim_700.dts +++ b/arch/arc/boot/dts/nsim_700.dts | |||
@@ -17,7 +17,7 @@ | |||
17 | interrupt-parent = <&core_intc>; | 17 | interrupt-parent = <&core_intc>; |
18 | 18 | ||
19 | chosen { | 19 | chosen { |
20 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8"; | 20 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1"; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | aliases { | 23 | aliases { |
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts index 8d787b251f73..8e2489b16b0a 100644 --- a/arch/arc/boot/dts/nsim_hs.dts +++ b/arch/arc/boot/dts/nsim_hs.dts | |||
@@ -24,7 +24,7 @@ | |||
24 | }; | 24 | }; |
25 | 25 | ||
26 | chosen { | 26 | chosen { |
27 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8"; | 27 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1"; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | aliases { | 30 | aliases { |
diff --git a/arch/arc/boot/dts/nsim_hs_idu.dts b/arch/arc/boot/dts/nsim_hs_idu.dts index 4f98ebf71fd8..ed12f494721d 100644 --- a/arch/arc/boot/dts/nsim_hs_idu.dts +++ b/arch/arc/boot/dts/nsim_hs_idu.dts | |||
@@ -15,7 +15,7 @@ | |||
15 | interrupt-parent = <&core_intc>; | 15 | interrupt-parent = <&core_intc>; |
16 | 16 | ||
17 | chosen { | 17 | chosen { |
18 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8"; | 18 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1"; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | aliases { | 21 | aliases { |
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts index 3c391ba565ed..7842e5eb4ab5 100644 --- a/arch/arc/boot/dts/nsimosci.dts +++ b/arch/arc/boot/dts/nsimosci.dts | |||
@@ -20,7 +20,7 @@ | |||
20 | /* this is for console on PGU */ | 20 | /* this is for console on PGU */ |
21 | /* bootargs = "console=tty0 consoleblank=0"; */ | 21 | /* bootargs = "console=tty0 consoleblank=0"; */ |
22 | /* this is for console on serial */ | 22 | /* this is for console on serial */ |
23 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24"; | 23 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1"; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | aliases { | 26 | aliases { |
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts index 14a727cbf4c9..b8838cf2b4ec 100644 --- a/arch/arc/boot/dts/nsimosci_hs.dts +++ b/arch/arc/boot/dts/nsimosci_hs.dts | |||
@@ -20,7 +20,7 @@ | |||
20 | /* this is for console on PGU */ | 20 | /* this is for console on PGU */ |
21 | /* bootargs = "console=tty0 consoleblank=0"; */ | 21 | /* bootargs = "console=tty0 consoleblank=0"; */ |
22 | /* this is for console on serial */ | 22 | /* this is for console on serial */ |
23 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24"; | 23 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1"; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | aliases { | 26 | aliases { |
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts index 5052917d4a99..72a2c723f1f7 100644 --- a/arch/arc/boot/dts/nsimosci_hs_idu.dts +++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | chosen { | 19 | chosen { |
20 | /* this is for console on serial */ | 20 | /* this is for console on serial */ |
21 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24"; | 21 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24 print-fatal-signals=1"; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | aliases { | 24 | aliases { |
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 257a68f3c2fe..309f4e6721b3 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h | |||
@@ -184,7 +184,7 @@ | |||
184 | .macro FAKE_RET_FROM_EXCPN | 184 | .macro FAKE_RET_FROM_EXCPN |
185 | lr r9, [status32] | 185 | lr r9, [status32] |
186 | bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) | 186 | bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) |
187 | or r9, r9, (STATUS_L_MASK|STATUS_IE_MASK) | 187 | or r9, r9, STATUS_IE_MASK |
188 | kflag r9 | 188 | kflag r9 |
189 | .endm | 189 | .endm |
190 | 190 | ||
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index f61a52b01625..5fe84e481654 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
@@ -22,10 +22,79 @@ static DEFINE_RAW_SPINLOCK(mcip_lock); | |||
22 | 22 | ||
23 | static char smp_cpuinfo_buf[128]; | 23 | static char smp_cpuinfo_buf[128]; |
24 | 24 | ||
25 | /* | ||
26 | * Set mask to halt GFRC if any online core in SMP cluster is halted. | ||
27 | * Only works for ARC HS v3.0+, on earlier versions has no effect. | ||
28 | */ | ||
29 | static void mcip_update_gfrc_halt_mask(int cpu) | ||
30 | { | ||
31 | struct bcr_generic gfrc; | ||
32 | unsigned long flags; | ||
33 | u32 gfrc_halt_mask; | ||
34 | |||
35 | READ_BCR(ARC_REG_GFRC_BUILD, gfrc); | ||
36 | |||
37 | /* | ||
38 | * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in | ||
39 | * GFRC 0x3 version. | ||
40 | */ | ||
41 | if (gfrc.ver < 0x3) | ||
42 | return; | ||
43 | |||
44 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
45 | |||
46 | __mcip_cmd(CMD_GFRC_READ_CORE, 0); | ||
47 | gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); | ||
48 | gfrc_halt_mask |= BIT(cpu); | ||
49 | __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); | ||
50 | |||
51 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
52 | } | ||
53 | |||
54 | static void mcip_update_debug_halt_mask(int cpu) | ||
55 | { | ||
56 | u32 mcip_mask = 0; | ||
57 | unsigned long flags; | ||
58 | |||
59 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
60 | |||
61 | /* | ||
62 | * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK | ||
63 | * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK | ||
64 | * and CMD_DEBUG_READ_SELECT. | ||
65 | */ | ||
66 | __mcip_cmd(CMD_DEBUG_READ_SELECT, 0); | ||
67 | mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK); | ||
68 | |||
69 | mcip_mask |= BIT(cpu); | ||
70 | |||
71 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask); | ||
72 | /* | ||
73 | * Parameter specified halt cause: | ||
74 | * STATUS32[H]/actionpoint/breakpoint/self-halt | ||
75 | * We choose all of them (0xF). | ||
76 | */ | ||
77 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask); | ||
78 | |||
79 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
80 | } | ||
81 | |||
25 | static void mcip_setup_per_cpu(int cpu) | 82 | static void mcip_setup_per_cpu(int cpu) |
26 | { | 83 | { |
84 | struct mcip_bcr mp; | ||
85 | |||
86 | READ_BCR(ARC_REG_MCIP_BCR, mp); | ||
87 | |||
27 | smp_ipi_irq_setup(cpu, IPI_IRQ); | 88 | smp_ipi_irq_setup(cpu, IPI_IRQ); |
28 | smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); | 89 | smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); |
90 | |||
91 | /* Update GFRC halt mask as new CPU came online */ | ||
92 | if (mp.gfrc) | ||
93 | mcip_update_gfrc_halt_mask(cpu); | ||
94 | |||
95 | /* Update MCIP debug mask as new CPU came online */ | ||
96 | if (mp.dbg) | ||
97 | mcip_update_debug_halt_mask(cpu); | ||
29 | } | 98 | } |
30 | 99 | ||
31 | static void mcip_ipi_send(int cpu) | 100 | static void mcip_ipi_send(int cpu) |
@@ -101,11 +170,6 @@ static void mcip_probe_n_setup(void) | |||
101 | IS_AVAIL1(mp.gfrc, "GFRC")); | 170 | IS_AVAIL1(mp.gfrc, "GFRC")); |
102 | 171 | ||
103 | cpuinfo_arc700[0].extn.gfrc = mp.gfrc; | 172 | cpuinfo_arc700[0].extn.gfrc = mp.gfrc; |
104 | |||
105 | if (mp.dbg) { | ||
106 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); | ||
107 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); | ||
108 | } | ||
109 | } | 173 | } |
110 | 174 | ||
111 | struct plat_smp_ops plat_smp_ops = { | 175 | struct plat_smp_ops plat_smp_ops = { |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index ec12fe1c2f07..b2cae79a25d7 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
@@ -51,7 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = { | |||
51 | { 0x51, "R2.0" }, | 51 | { 0x51, "R2.0" }, |
52 | { 0x52, "R2.1" }, | 52 | { 0x52, "R2.1" }, |
53 | { 0x53, "R3.0" }, | 53 | { 0x53, "R3.0" }, |
54 | { 0x54, "R4.0" }, | 54 | { 0x54, "R3.10a" }, |
55 | #endif | 55 | #endif |
56 | { 0x00, NULL } | 56 | { 0x00, NULL } |
57 | }; | 57 | }; |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index efe8b4200a67..21d86c36692b 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/reboot.h> | 24 | #include <linux/reboot.h> |
25 | #include <linux/irqdomain.h> | 25 | #include <linux/irqdomain.h> |
26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
27 | #include <linux/of_fdt.h> | ||
27 | 28 | ||
28 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
29 | #include <asm/setup.h> | 30 | #include <asm/setup.h> |
@@ -47,6 +48,42 @@ void __init smp_prepare_boot_cpu(void) | |||
47 | { | 48 | { |
48 | } | 49 | } |
49 | 50 | ||
51 | static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask) | ||
52 | { | ||
53 | unsigned long dt_root = of_get_flat_dt_root(); | ||
54 | const char *buf; | ||
55 | |||
56 | buf = of_get_flat_dt_prop(dt_root, name, NULL); | ||
57 | if (!buf) | ||
58 | return -EINVAL; | ||
59 | |||
60 | if (cpulist_parse(buf, cpumask)) | ||
61 | return -EINVAL; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Read from DeviceTree and setup cpu possible mask. If there is no | ||
68 | * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist. | ||
69 | */ | ||
70 | static void __init arc_init_cpu_possible(void) | ||
71 | { | ||
72 | struct cpumask cpumask; | ||
73 | |||
74 | if (arc_get_cpu_map("possible-cpus", &cpumask)) { | ||
75 | pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n", | ||
76 | NR_CPUS); | ||
77 | |||
78 | cpumask_setall(&cpumask); | ||
79 | } | ||
80 | |||
81 | if (!cpumask_test_cpu(0, &cpumask)) | ||
82 | panic("Master cpu (cpu[0]) is missed in cpu possible mask!"); | ||
83 | |||
84 | init_cpu_possible(&cpumask); | ||
85 | } | ||
86 | |||
50 | /* | 87 | /* |
51 | * Called from setup_arch() before calling setup_processor() | 88 | * Called from setup_arch() before calling setup_processor() |
52 | * | 89 | * |
@@ -58,10 +95,7 @@ void __init smp_prepare_boot_cpu(void) | |||
58 | */ | 95 | */ |
59 | void __init smp_init_cpus(void) | 96 | void __init smp_init_cpus(void) |
60 | { | 97 | { |
61 | unsigned int i; | 98 | arc_init_cpu_possible(); |
62 | |||
63 | for (i = 0; i < NR_CPUS; i++) | ||
64 | set_cpu_possible(i, true); | ||
65 | 99 | ||
66 | if (plat_smp_ops.init_early_smp) | 100 | if (plat_smp_ops.init_early_smp) |
67 | plat_smp_ops.init_early_smp(); | 101 | plat_smp_ops.init_early_smp(); |
@@ -70,16 +104,12 @@ void __init smp_init_cpus(void) | |||
70 | /* called from init ( ) => process 1 */ | 104 | /* called from init ( ) => process 1 */ |
71 | void __init smp_prepare_cpus(unsigned int max_cpus) | 105 | void __init smp_prepare_cpus(unsigned int max_cpus) |
72 | { | 106 | { |
73 | int i; | ||
74 | |||
75 | /* | 107 | /* |
76 | * if platform didn't set the present map already, do it now | 108 | * if platform didn't set the present map already, do it now |
77 | * boot cpu is set to present already by init/main.c | 109 | * boot cpu is set to present already by init/main.c |
78 | */ | 110 | */ |
79 | if (num_present_cpus() <= 1) { | 111 | if (num_present_cpus() <= 1) |
80 | for (i = 0; i < max_cpus; i++) | 112 | init_cpu_present(cpu_possible_mask); |
81 | set_cpu_present(i, true); | ||
82 | } | ||
83 | } | 113 | } |
84 | 114 | ||
85 | void __init smp_cpus_done(unsigned int max_cpus) | 115 | void __init smp_cpus_done(unsigned int max_cpus) |
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index eee924dfffa6..2072f3451e9c 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c | |||
@@ -780,7 +780,10 @@ noinline static void slc_entire_op(const int op) | |||
780 | 780 | ||
781 | write_aux_reg(r, ctrl); | 781 | write_aux_reg(r, ctrl); |
782 | 782 | ||
783 | write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); | 783 | if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ |
784 | write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1); | ||
785 | else | ||
786 | write_aux_reg(ARC_REG_SLC_FLUSH, 0x1); | ||
784 | 787 | ||
785 | /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ | 788 | /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ |
786 | read_aux_reg(r); | 789 | read_aux_reg(r); |
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi index 18045c38bcf1..db7cded1b7ad 100644 --- a/arch/arm/boot/dts/bcm11351.dtsi +++ b/arch/arm/boot/dts/bcm11351.dtsi | |||
@@ -55,7 +55,7 @@ | |||
55 | <0x3ff00100 0x100>; | 55 | <0x3ff00100 0x100>; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | smc@0x3404c000 { | 58 | smc@3404c000 { |
59 | compatible = "brcm,bcm11351-smc", "brcm,kona-smc"; | 59 | compatible = "brcm,bcm11351-smc", "brcm,kona-smc"; |
60 | reg = <0x3404c000 0x400>; /* 1 KiB in SRAM */ | 60 | reg = <0x3404c000 0x400>; /* 1 KiB in SRAM */ |
61 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm21664.dtsi b/arch/arm/boot/dts/bcm21664.dtsi index 6dde95f21cef..266f2611dc22 100644 --- a/arch/arm/boot/dts/bcm21664.dtsi +++ b/arch/arm/boot/dts/bcm21664.dtsi | |||
@@ -55,7 +55,7 @@ | |||
55 | <0x3ff00100 0x100>; | 55 | <0x3ff00100 0x100>; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | smc@0x3404e000 { | 58 | smc@3404e000 { |
59 | compatible = "brcm,bcm21664-smc", "brcm,kona-smc"; | 59 | compatible = "brcm,bcm21664-smc", "brcm,kona-smc"; |
60 | reg = <0x3404e000 0x400>; /* 1 KiB in SRAM */ | 60 | reg = <0x3404e000 0x400>; /* 1 KiB in SRAM */ |
61 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi index 0e3d2a5ff208..a5c3824c8056 100644 --- a/arch/arm/boot/dts/bcm2835.dtsi +++ b/arch/arm/boot/dts/bcm2835.dtsi | |||
@@ -18,10 +18,10 @@ | |||
18 | soc { | 18 | soc { |
19 | ranges = <0x7e000000 0x20000000 0x02000000>; | 19 | ranges = <0x7e000000 0x20000000 0x02000000>; |
20 | dma-ranges = <0x40000000 0x00000000 0x20000000>; | 20 | dma-ranges = <0x40000000 0x00000000 0x20000000>; |
21 | }; | ||
21 | 22 | ||
22 | arm-pmu { | 23 | arm-pmu { |
23 | compatible = "arm,arm1176-pmu"; | 24 | compatible = "arm,arm1176-pmu"; |
24 | }; | ||
25 | }; | 25 | }; |
26 | }; | 26 | }; |
27 | 27 | ||
diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi index 1dfd76442777..c933e8413884 100644 --- a/arch/arm/boot/dts/bcm2836.dtsi +++ b/arch/arm/boot/dts/bcm2836.dtsi | |||
@@ -9,19 +9,19 @@ | |||
9 | <0x40000000 0x40000000 0x00001000>; | 9 | <0x40000000 0x40000000 0x00001000>; |
10 | dma-ranges = <0xc0000000 0x00000000 0x3f000000>; | 10 | dma-ranges = <0xc0000000 0x00000000 0x3f000000>; |
11 | 11 | ||
12 | local_intc: local_intc { | 12 | local_intc: local_intc@40000000 { |
13 | compatible = "brcm,bcm2836-l1-intc"; | 13 | compatible = "brcm,bcm2836-l1-intc"; |
14 | reg = <0x40000000 0x100>; | 14 | reg = <0x40000000 0x100>; |
15 | interrupt-controller; | 15 | interrupt-controller; |
16 | #interrupt-cells = <2>; | 16 | #interrupt-cells = <2>; |
17 | interrupt-parent = <&local_intc>; | 17 | interrupt-parent = <&local_intc>; |
18 | }; | 18 | }; |
19 | }; | ||
19 | 20 | ||
20 | arm-pmu { | 21 | arm-pmu { |
21 | compatible = "arm,cortex-a7-pmu"; | 22 | compatible = "arm,cortex-a7-pmu"; |
22 | interrupt-parent = <&local_intc>; | 23 | interrupt-parent = <&local_intc>; |
23 | interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; | 24 | interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; |
24 | }; | ||
25 | }; | 25 | }; |
26 | 26 | ||
27 | timer { | 27 | timer { |
diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi index efa7d3387ab2..7704bb029605 100644 --- a/arch/arm/boot/dts/bcm2837.dtsi +++ b/arch/arm/boot/dts/bcm2837.dtsi | |||
@@ -8,7 +8,7 @@ | |||
8 | <0x40000000 0x40000000 0x00001000>; | 8 | <0x40000000 0x40000000 0x00001000>; |
9 | dma-ranges = <0xc0000000 0x00000000 0x3f000000>; | 9 | dma-ranges = <0xc0000000 0x00000000 0x3f000000>; |
10 | 10 | ||
11 | local_intc: local_intc { | 11 | local_intc: local_intc@40000000 { |
12 | compatible = "brcm,bcm2836-l1-intc"; | 12 | compatible = "brcm,bcm2836-l1-intc"; |
13 | reg = <0x40000000 0x100>; | 13 | reg = <0x40000000 0x100>; |
14 | interrupt-controller; | 14 | interrupt-controller; |
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 18db25a5a66e..9d293decf8d3 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi | |||
@@ -465,7 +465,7 @@ | |||
465 | status = "disabled"; | 465 | status = "disabled"; |
466 | }; | 466 | }; |
467 | 467 | ||
468 | aux: aux@0x7e215000 { | 468 | aux: aux@7e215000 { |
469 | compatible = "brcm,bcm2835-aux"; | 469 | compatible = "brcm,bcm2835-aux"; |
470 | #clock-cells = <1>; | 470 | #clock-cells = <1>; |
471 | reg = <0x7e215000 0x8>; | 471 | reg = <0x7e215000 0x8>; |
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index 6a44b8021702..f0e2008f7490 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | memory { | 50 | memory { |
51 | device_type = "memory"; | 51 | device_type = "memory"; |
52 | reg = <0x60000000 0x80000000>; | 52 | reg = <0x60000000 0x20000000>; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | gpio-restart { | 55 | gpio-restart { |
diff --git a/arch/arm/boot/dts/gemini-dlink-dns-313.dts b/arch/arm/boot/dts/gemini-dlink-dns-313.dts index 08568ce24d06..da8bb9d60f99 100644 --- a/arch/arm/boot/dts/gemini-dlink-dns-313.dts +++ b/arch/arm/boot/dts/gemini-dlink-dns-313.dts | |||
@@ -269,7 +269,7 @@ | |||
269 | 269 | ||
270 | sata: sata@46000000 { | 270 | sata: sata@46000000 { |
271 | /* The ROM uses this muxmode */ | 271 | /* The ROM uses this muxmode */ |
272 | cortina,gemini-ata-muxmode = <3>; | 272 | cortina,gemini-ata-muxmode = <0>; |
273 | cortina,gemini-enable-sata-bridge; | 273 | cortina,gemini-enable-sata-bridge; |
274 | status = "okay"; | 274 | status = "okay"; |
275 | }; | 275 | }; |
diff --git a/arch/arm/boot/dts/imx6dl-icore-rqs.dts b/arch/arm/boot/dts/imx6dl-icore-rqs.dts index cf42c2f5cdc7..1281bc39b7ab 100644 --- a/arch/arm/boot/dts/imx6dl-icore-rqs.dts +++ b/arch/arm/boot/dts/imx6dl-icore-rqs.dts | |||
@@ -42,7 +42,7 @@ | |||
42 | 42 | ||
43 | /dts-v1/; | 43 | /dts-v1/; |
44 | 44 | ||
45 | #include "imx6q.dtsi" | 45 | #include "imx6dl.dtsi" |
46 | #include "imx6qdl-icore-rqs.dtsi" | 46 | #include "imx6qdl-icore-rqs.dtsi" |
47 | 47 | ||
48 | / { | 48 | / { |
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index c1aa7a4518fb..a30ee9fcb3ae 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi | |||
@@ -71,6 +71,8 @@ | |||
71 | }; | 71 | }; |
72 | 72 | ||
73 | &i2c1 { | 73 | &i2c1 { |
74 | pinctrl-names = "default"; | ||
75 | pinctrl-0 = <&i2c1_pins>; | ||
74 | clock-frequency = <2600000>; | 76 | clock-frequency = <2600000>; |
75 | 77 | ||
76 | twl: twl@48 { | 78 | twl: twl@48 { |
@@ -189,7 +191,12 @@ | |||
189 | >; | 191 | >; |
190 | }; | 192 | }; |
191 | 193 | ||
192 | 194 | i2c1_pins: pinmux_i2c1_pins { | |
195 | pinctrl-single,pins = < | ||
196 | OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ | ||
197 | OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ | ||
198 | >; | ||
199 | }; | ||
193 | }; | 200 | }; |
194 | 201 | ||
195 | &omap3_pmx_wkup { | 202 | &omap3_pmx_wkup { |
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index b50b796e15c7..47915447a826 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi | |||
@@ -66,6 +66,8 @@ | |||
66 | }; | 66 | }; |
67 | 67 | ||
68 | &i2c1 { | 68 | &i2c1 { |
69 | pinctrl-names = "default"; | ||
70 | pinctrl-0 = <&i2c1_pins>; | ||
69 | clock-frequency = <2600000>; | 71 | clock-frequency = <2600000>; |
70 | 72 | ||
71 | twl: twl@48 { | 73 | twl: twl@48 { |
@@ -136,6 +138,12 @@ | |||
136 | OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ | 138 | OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ |
137 | >; | 139 | >; |
138 | }; | 140 | }; |
141 | i2c1_pins: pinmux_i2c1_pins { | ||
142 | pinctrl-single,pins = < | ||
143 | OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ | ||
144 | OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ | ||
145 | >; | ||
146 | }; | ||
139 | }; | 147 | }; |
140 | 148 | ||
141 | &uart2 { | 149 | &uart2 { |
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts index ec2c8baef62a..592e17fd4eeb 100644 --- a/arch/arm/boot/dts/omap5-uevm.dts +++ b/arch/arm/boot/dts/omap5-uevm.dts | |||
@@ -47,7 +47,7 @@ | |||
47 | gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 */ | 47 | gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 */ |
48 | wakeup-source; | 48 | wakeup-source; |
49 | autorepeat; | 49 | autorepeat; |
50 | debounce_interval = <50>; | 50 | debounce-interval = <50>; |
51 | }; | 51 | }; |
52 | }; | 52 | }; |
53 | 53 | ||
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index 3b704cfed69a..a97458112ff6 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi | |||
@@ -280,7 +280,7 @@ | |||
280 | max-frequency = <37500000>; | 280 | max-frequency = <37500000>; |
281 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, | 281 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, |
282 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; | 282 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; |
283 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 283 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
284 | fifo-depth = <0x100>; | 284 | fifo-depth = <0x100>; |
285 | interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; | 285 | interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; |
286 | resets = <&cru SRST_SDIO>; | 286 | resets = <&cru SRST_SDIO>; |
@@ -298,7 +298,7 @@ | |||
298 | max-frequency = <37500000>; | 298 | max-frequency = <37500000>; |
299 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, | 299 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, |
300 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; | 300 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; |
301 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 301 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
302 | default-sample-phase = <158>; | 302 | default-sample-phase = <158>; |
303 | disable-wp; | 303 | disable-wp; |
304 | dmas = <&pdma 12>; | 304 | dmas = <&pdma 12>; |
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi index 780ec3a99b21..341deaf62ff6 100644 --- a/arch/arm/boot/dts/rk322x.dtsi +++ b/arch/arm/boot/dts/rk322x.dtsi | |||
@@ -621,7 +621,7 @@ | |||
621 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; | 621 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; |
622 | clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, | 622 | clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, |
623 | <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; | 623 | <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; |
624 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 624 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
625 | fifo-depth = <0x100>; | 625 | fifo-depth = <0x100>; |
626 | pinctrl-names = "default"; | 626 | pinctrl-names = "default"; |
627 | pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>; | 627 | pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>; |
@@ -634,7 +634,7 @@ | |||
634 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; | 634 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; |
635 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, | 635 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, |
636 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; | 636 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; |
637 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 637 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
638 | fifo-depth = <0x100>; | 638 | fifo-depth = <0x100>; |
639 | pinctrl-names = "default"; | 639 | pinctrl-names = "default"; |
640 | pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>; | 640 | pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>; |
@@ -649,7 +649,7 @@ | |||
649 | max-frequency = <37500000>; | 649 | max-frequency = <37500000>; |
650 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, | 650 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, |
651 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; | 651 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; |
652 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 652 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
653 | bus-width = <8>; | 653 | bus-width = <8>; |
654 | default-sample-phase = <158>; | 654 | default-sample-phase = <158>; |
655 | fifo-depth = <0x100>; | 655 | fifo-depth = <0x100>; |
diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi index 99cfae875e12..5eae4776ffde 100644 --- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi +++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi | |||
@@ -110,26 +110,6 @@ | |||
110 | }; | 110 | }; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | &cpu0 { | ||
114 | cpu0-supply = <&vdd_cpu>; | ||
115 | operating-points = < | ||
116 | /* KHz uV */ | ||
117 | 1800000 1400000 | ||
118 | 1608000 1350000 | ||
119 | 1512000 1300000 | ||
120 | 1416000 1200000 | ||
121 | 1200000 1100000 | ||
122 | 1008000 1050000 | ||
123 | 816000 1000000 | ||
124 | 696000 950000 | ||
125 | 600000 900000 | ||
126 | 408000 900000 | ||
127 | 312000 900000 | ||
128 | 216000 900000 | ||
129 | 126000 900000 | ||
130 | >; | ||
131 | }; | ||
132 | |||
133 | &emmc { | 113 | &emmc { |
134 | status = "okay"; | 114 | status = "okay"; |
135 | bus-width = <8>; | 115 | bus-width = <8>; |
diff --git a/arch/arm/boot/dts/zx296702.dtsi b/arch/arm/boot/dts/zx296702.dtsi index 8a74efdb6360..240e7a23d81f 100644 --- a/arch/arm/boot/dts/zx296702.dtsi +++ b/arch/arm/boot/dts/zx296702.dtsi | |||
@@ -56,7 +56,7 @@ | |||
56 | clocks = <&topclk ZX296702_A9_PERIPHCLK>; | 56 | clocks = <&topclk ZX296702_A9_PERIPHCLK>; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | l2cc: l2-cache-controller@0x00c00000 { | 59 | l2cc: l2-cache-controller@c00000 { |
60 | compatible = "arm,pl310-cache"; | 60 | compatible = "arm,pl310-cache"; |
61 | reg = <0x00c00000 0x1000>; | 61 | reg = <0x00c00000 0x1000>; |
62 | cache-unified; | 62 | cache-unified; |
@@ -67,30 +67,30 @@ | |||
67 | arm,double-linefill-incr = <0>; | 67 | arm,double-linefill-incr = <0>; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | pcu: pcu@0xa0008000 { | 70 | pcu: pcu@a0008000 { |
71 | compatible = "zte,zx296702-pcu"; | 71 | compatible = "zte,zx296702-pcu"; |
72 | reg = <0xa0008000 0x1000>; | 72 | reg = <0xa0008000 0x1000>; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | topclk: topclk@0x09800000 { | 75 | topclk: topclk@9800000 { |
76 | compatible = "zte,zx296702-topcrm-clk"; | 76 | compatible = "zte,zx296702-topcrm-clk"; |
77 | reg = <0x09800000 0x1000>; | 77 | reg = <0x09800000 0x1000>; |
78 | #clock-cells = <1>; | 78 | #clock-cells = <1>; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | lsp1clk: lsp1clk@0x09400000 { | 81 | lsp1clk: lsp1clk@9400000 { |
82 | compatible = "zte,zx296702-lsp1crpm-clk"; | 82 | compatible = "zte,zx296702-lsp1crpm-clk"; |
83 | reg = <0x09400000 0x1000>; | 83 | reg = <0x09400000 0x1000>; |
84 | #clock-cells = <1>; | 84 | #clock-cells = <1>; |
85 | }; | 85 | }; |
86 | 86 | ||
87 | lsp0clk: lsp0clk@0x0b000000 { | 87 | lsp0clk: lsp0clk@b000000 { |
88 | compatible = "zte,zx296702-lsp0crpm-clk"; | 88 | compatible = "zte,zx296702-lsp0crpm-clk"; |
89 | reg = <0x0b000000 0x1000>; | 89 | reg = <0x0b000000 0x1000>; |
90 | #clock-cells = <1>; | 90 | #clock-cells = <1>; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | uart0: serial@0x09405000 { | 93 | uart0: serial@9405000 { |
94 | compatible = "zte,zx296702-uart"; | 94 | compatible = "zte,zx296702-uart"; |
95 | reg = <0x09405000 0x1000>; | 95 | reg = <0x09405000 0x1000>; |
96 | interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; | 96 | interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; |
@@ -98,7 +98,7 @@ | |||
98 | status = "disabled"; | 98 | status = "disabled"; |
99 | }; | 99 | }; |
100 | 100 | ||
101 | uart1: serial@0x09406000 { | 101 | uart1: serial@9406000 { |
102 | compatible = "zte,zx296702-uart"; | 102 | compatible = "zte,zx296702-uart"; |
103 | reg = <0x09406000 0x1000>; | 103 | reg = <0x09406000 0x1000>; |
104 | interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>; | 104 | interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>; |
@@ -106,7 +106,7 @@ | |||
106 | status = "disabled"; | 106 | status = "disabled"; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | mmc0: mmc@0x09408000 { | 109 | mmc0: mmc@9408000 { |
110 | compatible = "snps,dw-mshc"; | 110 | compatible = "snps,dw-mshc"; |
111 | #address-cells = <1>; | 111 | #address-cells = <1>; |
112 | #size-cells = <0>; | 112 | #size-cells = <0>; |
@@ -119,7 +119,7 @@ | |||
119 | status = "disabled"; | 119 | status = "disabled"; |
120 | }; | 120 | }; |
121 | 121 | ||
122 | mmc1: mmc@0x0b003000 { | 122 | mmc1: mmc@b003000 { |
123 | compatible = "snps,dw-mshc"; | 123 | compatible = "snps,dw-mshc"; |
124 | #address-cells = <1>; | 124 | #address-cells = <1>; |
125 | #size-cells = <0>; | 125 | #size-cells = <0>; |
@@ -132,7 +132,7 @@ | |||
132 | status = "disabled"; | 132 | status = "disabled"; |
133 | }; | 133 | }; |
134 | 134 | ||
135 | sysctrl: sysctrl@0xa0007000 { | 135 | sysctrl: sysctrl@a0007000 { |
136 | compatible = "zte,sysctrl", "syscon"; | 136 | compatible = "zte,sysctrl", "syscon"; |
137 | reg = <0xa0007000 0x1000>; | 137 | reg = <0xa0007000 0x1000>; |
138 | }; | 138 | }; |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 2f145c4af93a..92674f247a12 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
@@ -319,7 +319,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y | |||
319 | CONFIG_RC_CORE=m | 319 | CONFIG_RC_CORE=m |
320 | CONFIG_MEDIA_CONTROLLER=y | 320 | CONFIG_MEDIA_CONTROLLER=y |
321 | CONFIG_VIDEO_V4L2_SUBDEV_API=y | 321 | CONFIG_VIDEO_V4L2_SUBDEV_API=y |
322 | CONFIG_LIRC=m | 322 | CONFIG_LIRC=y |
323 | CONFIG_RC_DEVICES=y | 323 | CONFIG_RC_DEVICES=y |
324 | CONFIG_IR_RX51=m | 324 | CONFIG_IR_RX51=m |
325 | CONFIG_V4L_PLATFORM_DRIVERS=y | 325 | CONFIG_V4L_PLATFORM_DRIVERS=y |
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 5638ce0c9524..63d6b404d88e 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile | |||
@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING | |||
7 | 7 | ||
8 | KVM=../../../../virt/kvm | 8 | KVM=../../../../virt/kvm |
9 | 9 | ||
10 | CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve) | ||
11 | |||
10 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o | 12 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o |
11 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o | 13 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o |
12 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o | 14 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o |
@@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o | |||
15 | obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o | 17 | obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o |
16 | obj-$(CONFIG_KVM_ARM_HOST) += vfp.o | 18 | obj-$(CONFIG_KVM_ARM_HOST) += vfp.o |
17 | obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o | 19 | obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o |
20 | CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE) | ||
21 | |||
18 | obj-$(CONFIG_KVM_ARM_HOST) += entry.o | 22 | obj-$(CONFIG_KVM_ARM_HOST) += entry.o |
19 | obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o | 23 | obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o |
20 | obj-$(CONFIG_KVM_ARM_HOST) += switch.o | 24 | obj-$(CONFIG_KVM_ARM_HOST) += switch.o |
25 | CFLAGS_switch.o += $(CFLAGS_ARMV7VE) | ||
21 | obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o | 26 | obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o |
diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c index 111bda8cdebd..be4b8b0a40ad 100644 --- a/arch/arm/kvm/hyp/banked-sr.c +++ b/arch/arm/kvm/hyp/banked-sr.c | |||
@@ -20,6 +20,10 @@ | |||
20 | 20 | ||
21 | #include <asm/kvm_hyp.h> | 21 | #include <asm/kvm_hyp.h> |
22 | 22 | ||
23 | /* | ||
24 | * gcc before 4.9 doesn't understand -march=armv7ve, so we have to | ||
25 | * trick the assembler. | ||
26 | */ | ||
23 | __asm__(".arch_extension virt"); | 27 | __asm__(".arch_extension virt"); |
24 | 28 | ||
25 | void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) | 29 | void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) |
diff --git a/arch/arm/mach-clps711x/board-dt.c b/arch/arm/mach-clps711x/board-dt.c index ee1f83b1a332..4c89a8e9a2e3 100644 --- a/arch/arm/mach-clps711x/board-dt.c +++ b/arch/arm/mach-clps711x/board-dt.c | |||
@@ -69,7 +69,7 @@ static void clps711x_restart(enum reboot_mode mode, const char *cmd) | |||
69 | soft_restart(0); | 69 | soft_restart(0); |
70 | } | 70 | } |
71 | 71 | ||
72 | static const char *clps711x_compat[] __initconst = { | 72 | static const char *const clps711x_compat[] __initconst = { |
73 | "cirrus,ep7209", | 73 | "cirrus,ep7209", |
74 | NULL | 74 | NULL |
75 | }; | 75 | }; |
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index e457f299cd44..d6b11907380c 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c | |||
@@ -368,7 +368,7 @@ static struct spi_eeprom at25640a = { | |||
368 | .flags = EE_ADDR2, | 368 | .flags = EE_ADDR2, |
369 | }; | 369 | }; |
370 | 370 | ||
371 | static struct spi_board_info dm355_evm_spi_info[] __initconst = { | 371 | static const struct spi_board_info dm355_evm_spi_info[] __initconst = { |
372 | { | 372 | { |
373 | .modalias = "at25", | 373 | .modalias = "at25", |
374 | .platform_data = &at25640a, | 374 | .platform_data = &at25640a, |
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index be997243447b..fad9a5611a5d 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c | |||
@@ -217,7 +217,7 @@ static struct spi_eeprom at25640a = { | |||
217 | .flags = EE_ADDR2, | 217 | .flags = EE_ADDR2, |
218 | }; | 218 | }; |
219 | 219 | ||
220 | static struct spi_board_info dm355_leopard_spi_info[] __initconst = { | 220 | static const struct spi_board_info dm355_leopard_spi_info[] __initconst = { |
221 | { | 221 | { |
222 | .modalias = "at25", | 222 | .modalias = "at25", |
223 | .platform_data = &at25640a, | 223 | .platform_data = &at25640a, |
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index e75741fb2c1d..e3780986d2a3 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c | |||
@@ -726,7 +726,7 @@ static struct spi_eeprom at25640 = { | |||
726 | .flags = EE_ADDR2, | 726 | .flags = EE_ADDR2, |
727 | }; | 727 | }; |
728 | 728 | ||
729 | static struct spi_board_info dm365_evm_spi_info[] __initconst = { | 729 | static const struct spi_board_info dm365_evm_spi_info[] __initconst = { |
730 | { | 730 | { |
731 | .modalias = "at25", | 731 | .modalias = "at25", |
732 | .platform_data = &at25640, | 732 | .platform_data = &at25640, |
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig index 6b32dc527edc..2c20599cc350 100644 --- a/arch/arm/mach-mvebu/Kconfig +++ b/arch/arm/mach-mvebu/Kconfig | |||
@@ -41,7 +41,7 @@ config MACH_ARMADA_375 | |||
41 | depends on ARCH_MULTI_V7 | 41 | depends on ARCH_MULTI_V7 |
42 | select ARMADA_370_XP_IRQ | 42 | select ARMADA_370_XP_IRQ |
43 | select ARM_ERRATA_720789 | 43 | select ARM_ERRATA_720789 |
44 | select ARM_ERRATA_753970 | 44 | select PL310_ERRATA_753970 |
45 | select ARM_GIC | 45 | select ARM_GIC |
46 | select ARMADA_375_CLK | 46 | select ARMADA_375_CLK |
47 | select HAVE_ARM_SCU | 47 | select HAVE_ARM_SCU |
@@ -57,7 +57,7 @@ config MACH_ARMADA_38X | |||
57 | bool "Marvell Armada 380/385 boards" | 57 | bool "Marvell Armada 380/385 boards" |
58 | depends on ARCH_MULTI_V7 | 58 | depends on ARCH_MULTI_V7 |
59 | select ARM_ERRATA_720789 | 59 | select ARM_ERRATA_720789 |
60 | select ARM_ERRATA_753970 | 60 | select PL310_ERRATA_753970 |
61 | select ARM_GIC | 61 | select ARM_GIC |
62 | select ARM_GLOBAL_TIMER | 62 | select ARM_GLOBAL_TIMER |
63 | select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK | 63 | select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK |
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c index 43e3e188f521..fa512413a471 100644 --- a/arch/arm/mach-omap1/clock.c +++ b/arch/arm/mach-omap1/clock.c | |||
@@ -1011,17 +1011,17 @@ static int clk_debugfs_register_one(struct clk *c) | |||
1011 | return -ENOMEM; | 1011 | return -ENOMEM; |
1012 | c->dent = d; | 1012 | c->dent = d; |
1013 | 1013 | ||
1014 | d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); | 1014 | d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount); |
1015 | if (!d) { | 1015 | if (!d) { |
1016 | err = -ENOMEM; | 1016 | err = -ENOMEM; |
1017 | goto err_out; | 1017 | goto err_out; |
1018 | } | 1018 | } |
1019 | d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); | 1019 | d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate); |
1020 | if (!d) { | 1020 | if (!d) { |
1021 | err = -ENOMEM; | 1021 | err = -ENOMEM; |
1022 | goto err_out; | 1022 | goto err_out; |
1023 | } | 1023 | } |
1024 | d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); | 1024 | d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags); |
1025 | if (!d) { | 1025 | if (!d) { |
1026 | err = -ENOMEM; | 1026 | err = -ENOMEM; |
1027 | goto err_out; | 1027 | goto err_out; |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index 4bb6751864a5..fc5fb776a710 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |||
@@ -299,8 +299,6 @@ static void irq_save_context(void) | |||
299 | if (soc_is_dra7xx()) | 299 | if (soc_is_dra7xx()) |
300 | return; | 300 | return; |
301 | 301 | ||
302 | if (!sar_base) | ||
303 | sar_base = omap4_get_sar_ram_base(); | ||
304 | if (wakeupgen_ops && wakeupgen_ops->save_context) | 302 | if (wakeupgen_ops && wakeupgen_ops->save_context) |
305 | wakeupgen_ops->save_context(); | 303 | wakeupgen_ops->save_context(); |
306 | } | 304 | } |
@@ -598,6 +596,8 @@ static int __init wakeupgen_init(struct device_node *node, | |||
598 | irq_hotplug_init(); | 596 | irq_hotplug_init(); |
599 | irq_pm_init(); | 597 | irq_pm_init(); |
600 | 598 | ||
599 | sar_base = omap4_get_sar_ram_base(); | ||
600 | |||
601 | return 0; | 601 | return 0; |
602 | } | 602 | } |
603 | IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init); | 603 | IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init); |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 124f9af34a15..34156eca8e23 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -977,6 +977,9 @@ static int _enable_clocks(struct omap_hwmod *oh) | |||
977 | 977 | ||
978 | pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name); | 978 | pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name); |
979 | 979 | ||
980 | if (oh->flags & HWMOD_OPT_CLKS_NEEDED) | ||
981 | _enable_optional_clocks(oh); | ||
982 | |||
980 | if (oh->_clk) | 983 | if (oh->_clk) |
981 | clk_enable(oh->_clk); | 984 | clk_enable(oh->_clk); |
982 | 985 | ||
@@ -985,9 +988,6 @@ static int _enable_clocks(struct omap_hwmod *oh) | |||
985 | clk_enable(os->_clk); | 988 | clk_enable(os->_clk); |
986 | } | 989 | } |
987 | 990 | ||
988 | if (oh->flags & HWMOD_OPT_CLKS_NEEDED) | ||
989 | _enable_optional_clocks(oh); | ||
990 | |||
991 | /* The opt clocks are controlled by the device driver. */ | 991 | /* The opt clocks are controlled by the device driver. */ |
992 | 992 | ||
993 | return 0; | 993 | return 0; |
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 366158a54fcd..6f68576e5695 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c | |||
@@ -186,7 +186,7 @@ static void omap_pm_end(void) | |||
186 | cpu_idle_poll_ctrl(false); | 186 | cpu_idle_poll_ctrl(false); |
187 | } | 187 | } |
188 | 188 | ||
189 | static void omap_pm_finish(void) | 189 | static void omap_pm_wake(void) |
190 | { | 190 | { |
191 | if (soc_is_omap34xx()) | 191 | if (soc_is_omap34xx()) |
192 | omap_prcm_irq_complete(); | 192 | omap_prcm_irq_complete(); |
@@ -196,7 +196,7 @@ static const struct platform_suspend_ops omap_pm_ops = { | |||
196 | .begin = omap_pm_begin, | 196 | .begin = omap_pm_begin, |
197 | .end = omap_pm_end, | 197 | .end = omap_pm_end, |
198 | .enter = omap_pm_enter, | 198 | .enter = omap_pm_enter, |
199 | .finish = omap_pm_finish, | 199 | .wake = omap_pm_wake, |
200 | .valid = suspend_valid_only_mem, | 200 | .valid = suspend_valid_only_mem, |
201 | }; | 201 | }; |
202 | 202 | ||
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index ece09c9461f7..d61fbd7a2840 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -156,12 +156,6 @@ static struct clock_event_device clockevent_gpt = { | |||
156 | .tick_resume = omap2_gp_timer_shutdown, | 156 | .tick_resume = omap2_gp_timer_shutdown, |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static struct property device_disabled = { | ||
160 | .name = "status", | ||
161 | .length = sizeof("disabled"), | ||
162 | .value = "disabled", | ||
163 | }; | ||
164 | |||
165 | static const struct of_device_id omap_timer_match[] __initconst = { | 159 | static const struct of_device_id omap_timer_match[] __initconst = { |
166 | { .compatible = "ti,omap2420-timer", }, | 160 | { .compatible = "ti,omap2420-timer", }, |
167 | { .compatible = "ti,omap3430-timer", }, | 161 | { .compatible = "ti,omap3430-timer", }, |
@@ -203,8 +197,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id * | |||
203 | of_get_property(np, "ti,timer-secure", NULL))) | 197 | of_get_property(np, "ti,timer-secure", NULL))) |
204 | continue; | 198 | continue; |
205 | 199 | ||
206 | if (!of_device_is_compatible(np, "ti,omap-counter32k")) | 200 | if (!of_device_is_compatible(np, "ti,omap-counter32k")) { |
207 | of_add_property(np, &device_disabled); | 201 | struct property *prop; |
202 | |||
203 | prop = kzalloc(sizeof(*prop), GFP_KERNEL); | ||
204 | if (!prop) | ||
205 | return NULL; | ||
206 | prop->name = "status"; | ||
207 | prop->value = "disabled"; | ||
208 | prop->length = strlen(prop->value); | ||
209 | of_add_property(np, prop); | ||
210 | } | ||
208 | return np; | 211 | return np; |
209 | } | 212 | } |
210 | 213 | ||
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig index 2a7bb6ccdcb7..a810f4dd34b1 100644 --- a/arch/arm/mach-orion5x/Kconfig +++ b/arch/arm/mach-orion5x/Kconfig | |||
@@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO | |||
58 | 58 | ||
59 | config MACH_DNS323 | 59 | config MACH_DNS323 |
60 | bool "D-Link DNS-323" | 60 | bool "D-Link DNS-323" |
61 | select GENERIC_NET_UTILS | ||
62 | select I2C_BOARDINFO if I2C | 61 | select I2C_BOARDINFO if I2C |
63 | help | 62 | help |
64 | Say 'Y' here if you want your kernel to support the | 63 | Say 'Y' here if you want your kernel to support the |
@@ -66,7 +65,6 @@ config MACH_DNS323 | |||
66 | 65 | ||
67 | config MACH_TS209 | 66 | config MACH_TS209 |
68 | bool "QNAP TS-109/TS-209" | 67 | bool "QNAP TS-109/TS-209" |
69 | select GENERIC_NET_UTILS | ||
70 | help | 68 | help |
71 | Say 'Y' here if you want your kernel to support the | 69 | Say 'Y' here if you want your kernel to support the |
72 | QNAP TS-109/TS-209 platform. | 70 | QNAP TS-109/TS-209 platform. |
@@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL | |||
101 | 99 | ||
102 | config MACH_TS409 | 100 | config MACH_TS409 |
103 | bool "QNAP TS-409" | 101 | bool "QNAP TS-409" |
104 | select GENERIC_NET_UTILS | ||
105 | help | 102 | help |
106 | Say 'Y' here if you want your kernel to support the | 103 | Say 'Y' here if you want your kernel to support the |
107 | QNAP TS-409 platform. | 104 | QNAP TS-409 platform. |
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c index cd483bfb5ca8..d13344b2ddcd 100644 --- a/arch/arm/mach-orion5x/dns323-setup.c +++ b/arch/arm/mach-orion5x/dns323-setup.c | |||
@@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = { | |||
173 | .phy_addr = MV643XX_ETH_PHY_ADDR(8), | 173 | .phy_addr = MV643XX_ETH_PHY_ADDR(8), |
174 | }; | 174 | }; |
175 | 175 | ||
176 | /* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these | ||
177 | * functions be kept somewhere? | ||
178 | */ | ||
179 | static int __init dns323_parse_hex_nibble(char n) | ||
180 | { | ||
181 | if (n >= '0' && n <= '9') | ||
182 | return n - '0'; | ||
183 | |||
184 | if (n >= 'A' && n <= 'F') | ||
185 | return n - 'A' + 10; | ||
186 | |||
187 | if (n >= 'a' && n <= 'f') | ||
188 | return n - 'a' + 10; | ||
189 | |||
190 | return -1; | ||
191 | } | ||
192 | |||
193 | static int __init dns323_parse_hex_byte(const char *b) | ||
194 | { | ||
195 | int hi; | ||
196 | int lo; | ||
197 | |||
198 | hi = dns323_parse_hex_nibble(b[0]); | ||
199 | lo = dns323_parse_hex_nibble(b[1]); | ||
200 | |||
201 | if (hi < 0 || lo < 0) | ||
202 | return -1; | ||
203 | |||
204 | return (hi << 4) | lo; | ||
205 | } | ||
206 | |||
176 | static int __init dns323_read_mac_addr(void) | 207 | static int __init dns323_read_mac_addr(void) |
177 | { | 208 | { |
178 | u_int8_t addr[6]; | 209 | u_int8_t addr[6]; |
179 | void __iomem *mac_page; | 210 | int i; |
211 | char *mac_page; | ||
180 | 212 | ||
181 | /* MAC address is stored as a regular ol' string in /dev/mtdblock4 | 213 | /* MAC address is stored as a regular ol' string in /dev/mtdblock4 |
182 | * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80). | 214 | * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80). |
@@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void) | |||
185 | if (!mac_page) | 217 | if (!mac_page) |
186 | return -ENOMEM; | 218 | return -ENOMEM; |
187 | 219 | ||
188 | if (!mac_pton((__force const char *) mac_page, addr)) | 220 | /* Sanity check the string we're looking at */ |
189 | goto error_fail; | 221 | for (i = 0; i < 5; i++) { |
222 | if (*(mac_page + (i * 3) + 2) != ':') { | ||
223 | goto error_fail; | ||
224 | } | ||
225 | } | ||
226 | |||
227 | for (i = 0; i < 6; i++) { | ||
228 | int byte; | ||
229 | |||
230 | byte = dns323_parse_hex_byte(mac_page + (i * 3)); | ||
231 | if (byte < 0) { | ||
232 | goto error_fail; | ||
233 | } | ||
234 | |||
235 | addr[i] = byte; | ||
236 | } | ||
190 | 237 | ||
191 | iounmap(mac_page); | 238 | iounmap(mac_page); |
192 | printk("DNS-323: Found ethernet MAC address: %pM\n", addr); | 239 | printk("DNS-323: Found ethernet MAC address: %pM\n", addr); |
diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c index 89774985d380..905d4f2dd0b8 100644 --- a/arch/arm/mach-orion5x/tsx09-common.c +++ b/arch/arm/mach-orion5x/tsx09-common.c | |||
@@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = { | |||
53 | .phy_addr = MV643XX_ETH_PHY_ADDR(8), | 53 | .phy_addr = MV643XX_ETH_PHY_ADDR(8), |
54 | }; | 54 | }; |
55 | 55 | ||
56 | static int __init qnap_tsx09_parse_hex_nibble(char n) | ||
57 | { | ||
58 | if (n >= '0' && n <= '9') | ||
59 | return n - '0'; | ||
60 | |||
61 | if (n >= 'A' && n <= 'F') | ||
62 | return n - 'A' + 10; | ||
63 | |||
64 | if (n >= 'a' && n <= 'f') | ||
65 | return n - 'a' + 10; | ||
66 | |||
67 | return -1; | ||
68 | } | ||
69 | |||
70 | static int __init qnap_tsx09_parse_hex_byte(const char *b) | ||
71 | { | ||
72 | int hi; | ||
73 | int lo; | ||
74 | |||
75 | hi = qnap_tsx09_parse_hex_nibble(b[0]); | ||
76 | lo = qnap_tsx09_parse_hex_nibble(b[1]); | ||
77 | |||
78 | if (hi < 0 || lo < 0) | ||
79 | return -1; | ||
80 | |||
81 | return (hi << 4) | lo; | ||
82 | } | ||
83 | |||
56 | static int __init qnap_tsx09_check_mac_addr(const char *addr_str) | 84 | static int __init qnap_tsx09_check_mac_addr(const char *addr_str) |
57 | { | 85 | { |
58 | u_int8_t addr[6]; | 86 | u_int8_t addr[6]; |
87 | int i; | ||
59 | 88 | ||
60 | if (!mac_pton(addr_str, addr)) | 89 | for (i = 0; i < 6; i++) { |
61 | return -1; | 90 | int byte; |
91 | |||
92 | /* | ||
93 | * Enforce "xx:xx:xx:xx:xx:xx\n" format. | ||
94 | */ | ||
95 | if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n')) | ||
96 | return -1; | ||
97 | |||
98 | byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3)); | ||
99 | if (byte < 0) | ||
100 | return -1; | ||
101 | addr[i] = byte; | ||
102 | } | ||
62 | 103 | ||
63 | printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr); | 104 | printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr); |
64 | 105 | ||
@@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size) | |||
77 | unsigned long addr; | 118 | unsigned long addr; |
78 | 119 | ||
79 | for (addr = mem_base; addr < (mem_base + size); addr += 1024) { | 120 | for (addr = mem_base; addr < (mem_base + size); addr += 1024) { |
80 | void __iomem *nor_page; | 121 | char *nor_page; |
81 | int ret = 0; | 122 | int ret = 0; |
82 | 123 | ||
83 | nor_page = ioremap(addr, 1024); | 124 | nor_page = ioremap(addr, 1024); |
84 | if (nor_page != NULL) { | 125 | if (nor_page != NULL) { |
85 | ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page); | 126 | ret = qnap_tsx09_check_mac_addr(nor_page); |
86 | iounmap(nor_page); | 127 | iounmap(nor_page); |
87 | } | 128 | } |
88 | 129 | ||
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index aff6994950ba..a2399fd66e97 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c | |||
@@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, | |||
472 | /***************************************************************************** | 472 | /***************************************************************************** |
473 | * Ethernet switch | 473 | * Ethernet switch |
474 | ****************************************************************************/ | 474 | ****************************************************************************/ |
475 | static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii"; | 475 | static __initdata struct mdio_board_info orion_ge00_switch_board_info = { |
476 | static __initdata struct mdio_board_info | 476 | .bus_id = "orion-mii", |
477 | orion_ge00_switch_board_info; | 477 | .modalias = "mv88e6085", |
478 | }; | ||
478 | 479 | ||
479 | void __init orion_ge00_switch_init(struct dsa_chip_data *d) | 480 | void __init orion_ge00_switch_init(struct dsa_chip_data *d) |
480 | { | 481 | { |
481 | struct mdio_board_info *bd; | ||
482 | unsigned int i; | 482 | unsigned int i; |
483 | 483 | ||
484 | if (!IS_BUILTIN(CONFIG_PHYLIB)) | 484 | if (!IS_BUILTIN(CONFIG_PHYLIB)) |
485 | return; | 485 | return; |
486 | 486 | ||
487 | for (i = 0; i < ARRAY_SIZE(d->port_names); i++) | 487 | for (i = 0; i < ARRAY_SIZE(d->port_names); i++) { |
488 | if (!strcmp(d->port_names[i], "cpu")) | 488 | if (!strcmp(d->port_names[i], "cpu")) { |
489 | d->netdev[i] = &orion_ge00.dev; | ||
489 | break; | 490 | break; |
491 | } | ||
492 | } | ||
490 | 493 | ||
491 | bd = &orion_ge00_switch_board_info; | 494 | orion_ge00_switch_board_info.mdio_addr = d->sw_addr; |
492 | bd->bus_id = orion_ge00_mvmdio_bus_name; | 495 | orion_ge00_switch_board_info.platform_data = d; |
493 | bd->mdio_addr = d->sw_addr; | ||
494 | d->netdev[i] = &orion_ge00.dev; | ||
495 | strcpy(bd->modalias, "mv88e6085"); | ||
496 | bd->platform_data = d; | ||
497 | 496 | ||
498 | mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); | 497 | mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); |
499 | } | 498 | } |
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index a80632641b39..70c776ef7aa7 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi | |||
@@ -165,14 +165,14 @@ | |||
165 | 165 | ||
166 | uart_A: serial@24000 { | 166 | uart_A: serial@24000 { |
167 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; | 167 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; |
168 | reg = <0x0 0x24000 0x0 0x14>; | 168 | reg = <0x0 0x24000 0x0 0x18>; |
169 | interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>; | 169 | interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>; |
170 | status = "disabled"; | 170 | status = "disabled"; |
171 | }; | 171 | }; |
172 | 172 | ||
173 | uart_B: serial@23000 { | 173 | uart_B: serial@23000 { |
174 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; | 174 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; |
175 | reg = <0x0 0x23000 0x0 0x14>; | 175 | reg = <0x0 0x23000 0x0 0x18>; |
176 | interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>; | 176 | interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>; |
177 | status = "disabled"; | 177 | status = "disabled"; |
178 | }; | 178 | }; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 6cb3c2a52baf..4ee2e7951482 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi | |||
@@ -235,14 +235,14 @@ | |||
235 | 235 | ||
236 | uart_A: serial@84c0 { | 236 | uart_A: serial@84c0 { |
237 | compatible = "amlogic,meson-gx-uart"; | 237 | compatible = "amlogic,meson-gx-uart"; |
238 | reg = <0x0 0x84c0 0x0 0x14>; | 238 | reg = <0x0 0x84c0 0x0 0x18>; |
239 | interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>; | 239 | interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>; |
240 | status = "disabled"; | 240 | status = "disabled"; |
241 | }; | 241 | }; |
242 | 242 | ||
243 | uart_B: serial@84dc { | 243 | uart_B: serial@84dc { |
244 | compatible = "amlogic,meson-gx-uart"; | 244 | compatible = "amlogic,meson-gx-uart"; |
245 | reg = <0x0 0x84dc 0x0 0x14>; | 245 | reg = <0x0 0x84dc 0x0 0x18>; |
246 | interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>; | 246 | interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>; |
247 | status = "disabled"; | 247 | status = "disabled"; |
248 | }; | 248 | }; |
@@ -287,7 +287,7 @@ | |||
287 | 287 | ||
288 | uart_C: serial@8700 { | 288 | uart_C: serial@8700 { |
289 | compatible = "amlogic,meson-gx-uart"; | 289 | compatible = "amlogic,meson-gx-uart"; |
290 | reg = <0x0 0x8700 0x0 0x14>; | 290 | reg = <0x0 0x8700 0x0 0x18>; |
291 | interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>; | 291 | interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>; |
292 | status = "disabled"; | 292 | status = "disabled"; |
293 | }; | 293 | }; |
@@ -404,14 +404,14 @@ | |||
404 | 404 | ||
405 | uart_AO: serial@4c0 { | 405 | uart_AO: serial@4c0 { |
406 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart"; | 406 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart"; |
407 | reg = <0x0 0x004c0 0x0 0x14>; | 407 | reg = <0x0 0x004c0 0x0 0x18>; |
408 | interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>; | 408 | interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>; |
409 | status = "disabled"; | 409 | status = "disabled"; |
410 | }; | 410 | }; |
411 | 411 | ||
412 | uart_AO_B: serial@4e0 { | 412 | uart_AO_B: serial@4e0 { |
413 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart"; | 413 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart"; |
414 | reg = <0x0 0x004e0 0x0 0x14>; | 414 | reg = <0x0 0x004e0 0x0 0x18>; |
415 | interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>; | 415 | interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>; |
416 | status = "disabled"; | 416 | status = "disabled"; |
417 | }; | 417 | }; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 4f355f17eed6..c8514110b9da 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi | |||
@@ -631,6 +631,7 @@ | |||
631 | 631 | ||
632 | internal_phy: ethernet-phy@8 { | 632 | internal_phy: ethernet-phy@8 { |
633 | compatible = "ethernet-phy-id0181.4400", "ethernet-phy-ieee802.3-c22"; | 633 | compatible = "ethernet-phy-id0181.4400", "ethernet-phy-ieee802.3-c22"; |
634 | interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>; | ||
634 | reg = <8>; | 635 | reg = <8>; |
635 | max-speed = <100>; | 636 | max-speed = <100>; |
636 | }; | 637 | }; |
diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi index 4220fbdcb24a..ff5c4c47b22b 100644 --- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi +++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi | |||
@@ -98,7 +98,7 @@ | |||
98 | clock-output-names = "clk125mhz"; | 98 | clock-output-names = "clk125mhz"; |
99 | }; | 99 | }; |
100 | 100 | ||
101 | pci { | 101 | pcie@30000000 { |
102 | compatible = "pci-host-ecam-generic"; | 102 | compatible = "pci-host-ecam-generic"; |
103 | device_type = "pci"; | 103 | device_type = "pci"; |
104 | #interrupt-cells = <1>; | 104 | #interrupt-cells = <1>; |
@@ -118,6 +118,7 @@ | |||
118 | ranges = | 118 | ranges = |
119 | <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000 | 119 | <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000 |
120 | 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>; | 120 | 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>; |
121 | bus-range = <0 0xff>; | ||
121 | interrupt-map-mask = <0 0 0 7>; | 122 | interrupt-map-mask = <0 0 0 7>; |
122 | interrupt-map = | 123 | interrupt-map = |
123 | /* addr pin ic icaddr icintr */ | 124 | /* addr pin ic icaddr icintr */ |
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index e94fa1a53192..047641fe294c 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts | |||
@@ -51,7 +51,7 @@ | |||
51 | #size-cells = <2>; | 51 | #size-cells = <2>; |
52 | ranges; | 52 | ranges; |
53 | 53 | ||
54 | ramoops@0x21f00000 { | 54 | ramoops@21f00000 { |
55 | compatible = "ramoops"; | 55 | compatible = "ramoops"; |
56 | reg = <0x0 0x21f00000 0x0 0x00100000>; | 56 | reg = <0x0 0x21f00000 0x0 0x00100000>; |
57 | record-size = <0x00020000>; | 57 | record-size = <0x00020000>; |
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 9fbe4705ee88..94597e33c806 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi | |||
@@ -341,7 +341,7 @@ | |||
341 | reg = <0 0x10005000 0 0x1000>; | 341 | reg = <0 0x10005000 0 0x1000>; |
342 | }; | 342 | }; |
343 | 343 | ||
344 | pio: pinctrl@0x10005000 { | 344 | pio: pinctrl@10005000 { |
345 | compatible = "mediatek,mt8173-pinctrl"; | 345 | compatible = "mediatek,mt8173-pinctrl"; |
346 | reg = <0 0x1000b000 0 0x1000>; | 346 | reg = <0 0x1000b000 0 0x1000>; |
347 | mediatek,pctl-regmap = <&syscfg_pctl_a>; | 347 | mediatek,pctl-regmap = <&syscfg_pctl_a>; |
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi index 492a011f14f6..1c8f1b86472d 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi | |||
@@ -140,16 +140,16 @@ | |||
140 | }; | 140 | }; |
141 | 141 | ||
142 | agnoc@0 { | 142 | agnoc@0 { |
143 | qcom,pcie@00600000 { | 143 | qcom,pcie@600000 { |
144 | perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>; | 144 | perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | qcom,pcie@00608000 { | 147 | qcom,pcie@608000 { |
148 | status = "okay"; | 148 | status = "okay"; |
149 | perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>; | 149 | perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>; |
150 | }; | 150 | }; |
151 | 151 | ||
152 | qcom,pcie@00610000 { | 152 | qcom,pcie@610000 { |
153 | status = "okay"; | 153 | status = "okay"; |
154 | perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>; | 154 | perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>; |
155 | }; | 155 | }; |
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 4b2afcc4fdf4..0a6f7952bbb1 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi | |||
@@ -840,7 +840,7 @@ | |||
840 | #size-cells = <1>; | 840 | #size-cells = <1>; |
841 | ranges; | 841 | ranges; |
842 | 842 | ||
843 | pcie0: qcom,pcie@00600000 { | 843 | pcie0: qcom,pcie@600000 { |
844 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; | 844 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; |
845 | status = "disabled"; | 845 | status = "disabled"; |
846 | power-domains = <&gcc PCIE0_GDSC>; | 846 | power-domains = <&gcc PCIE0_GDSC>; |
@@ -893,7 +893,7 @@ | |||
893 | 893 | ||
894 | }; | 894 | }; |
895 | 895 | ||
896 | pcie1: qcom,pcie@00608000 { | 896 | pcie1: qcom,pcie@608000 { |
897 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; | 897 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; |
898 | power-domains = <&gcc PCIE1_GDSC>; | 898 | power-domains = <&gcc PCIE1_GDSC>; |
899 | bus-range = <0x00 0xff>; | 899 | bus-range = <0x00 0xff>; |
@@ -946,7 +946,7 @@ | |||
946 | "bus_slave"; | 946 | "bus_slave"; |
947 | }; | 947 | }; |
948 | 948 | ||
949 | pcie2: qcom,pcie@00610000 { | 949 | pcie2: qcom,pcie@610000 { |
950 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; | 950 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; |
951 | power-domains = <&gcc PCIE2_GDSC>; | 951 | power-domains = <&gcc PCIE2_GDSC>; |
952 | bus-range = <0x00 0xff>; | 952 | bus-range = <0x00 0xff>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index 3890468678ce..28257724a56e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts | |||
@@ -132,17 +132,16 @@ | |||
132 | assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>; | 132 | assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>; |
133 | assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>; | 133 | assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>; |
134 | clock_in_out = "input"; | 134 | clock_in_out = "input"; |
135 | /* shows instability at 1GBit right now */ | ||
136 | max-speed = <100>; | ||
137 | phy-supply = <&vcc_io>; | 135 | phy-supply = <&vcc_io>; |
138 | phy-mode = "rgmii"; | 136 | phy-mode = "rgmii"; |
139 | pinctrl-names = "default"; | 137 | pinctrl-names = "default"; |
140 | pinctrl-0 = <&rgmiim1_pins>; | 138 | pinctrl-0 = <&rgmiim1_pins>; |
139 | snps,force_thresh_dma_mode; | ||
141 | snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; | 140 | snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; |
142 | snps,reset-active-low; | 141 | snps,reset-active-low; |
143 | snps,reset-delays-us = <0 10000 50000>; | 142 | snps,reset-delays-us = <0 10000 50000>; |
144 | tx_delay = <0x26>; | 143 | tx_delay = <0x24>; |
145 | rx_delay = <0x11>; | 144 | rx_delay = <0x18>; |
146 | status = "okay"; | 145 | status = "okay"; |
147 | }; | 146 | }; |
148 | 147 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index a037ee56fead..cae341554486 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi | |||
@@ -730,7 +730,7 @@ | |||
730 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; | 730 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; |
731 | clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, | 731 | clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, |
732 | <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; | 732 | <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; |
733 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 733 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
734 | fifo-depth = <0x100>; | 734 | fifo-depth = <0x100>; |
735 | status = "disabled"; | 735 | status = "disabled"; |
736 | }; | 736 | }; |
@@ -741,7 +741,7 @@ | |||
741 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; | 741 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; |
742 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, | 742 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, |
743 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; | 743 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; |
744 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 744 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
745 | fifo-depth = <0x100>; | 745 | fifo-depth = <0x100>; |
746 | status = "disabled"; | 746 | status = "disabled"; |
747 | }; | 747 | }; |
@@ -752,7 +752,7 @@ | |||
752 | interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; | 752 | interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; |
753 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, | 753 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, |
754 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; | 754 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; |
755 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 755 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
756 | fifo-depth = <0x100>; | 756 | fifo-depth = <0x100>; |
757 | status = "disabled"; | 757 | status = "disabled"; |
758 | }; | 758 | }; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index aa4d07046a7b..03458ac44201 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi | |||
@@ -257,7 +257,7 @@ | |||
257 | max-frequency = <150000000>; | 257 | max-frequency = <150000000>; |
258 | clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>, | 258 | clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>, |
259 | <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>; | 259 | <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>; |
260 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 260 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
261 | fifo-depth = <0x100>; | 261 | fifo-depth = <0x100>; |
262 | interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; | 262 | interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; |
263 | resets = <&cru SRST_SDIO0>; | 263 | resets = <&cru SRST_SDIO0>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi index 0f873c897d0d..ce592a4c0c4c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi | |||
@@ -457,7 +457,7 @@ | |||
457 | assigned-clocks = <&cru SCLK_PCIEPHY_REF>; | 457 | assigned-clocks = <&cru SCLK_PCIEPHY_REF>; |
458 | assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>; | 458 | assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>; |
459 | assigned-clock-rates = <100000000>; | 459 | assigned-clock-rates = <100000000>; |
460 | ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>; | 460 | ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>; |
461 | num-lanes = <4>; | 461 | num-lanes = <4>; |
462 | pinctrl-names = "default"; | 462 | pinctrl-names = "default"; |
463 | pinctrl-0 = <&pcie_clkreqn_cpm>; | 463 | pinctrl-0 = <&pcie_clkreqn_cpm>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 7aa2144e0d47..2605118d4b4c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi | |||
@@ -1739,8 +1739,8 @@ | |||
1739 | compatible = "rockchip,rk3399-edp"; | 1739 | compatible = "rockchip,rk3399-edp"; |
1740 | reg = <0x0 0xff970000 0x0 0x8000>; | 1740 | reg = <0x0 0xff970000 0x0 0x8000>; |
1741 | interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH 0>; | 1741 | interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH 0>; |
1742 | clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>; | 1742 | clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>, <&cru PCLK_VIO_GRF>; |
1743 | clock-names = "dp", "pclk"; | 1743 | clock-names = "dp", "pclk", "grf"; |
1744 | pinctrl-names = "default"; | 1744 | pinctrl-names = "default"; |
1745 | pinctrl-0 = <&edp_hpd>; | 1745 | pinctrl-0 = <&edp_hpd>; |
1746 | power-domains = <&power RK3399_PD_EDP>; | 1746 | power-domains = <&power RK3399_PD_EDP>; |
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 52f15cd896e1..b5a28336c077 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c | |||
@@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data) | |||
178 | case PSCI_CONDUIT_HVC: | 178 | case PSCI_CONDUIT_HVC: |
179 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | 179 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
180 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | 180 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
181 | if (res.a0) | 181 | if ((int)res.a0 < 0) |
182 | return 0; | 182 | return 0; |
183 | cb = call_hvc_arch_workaround_1; | 183 | cb = call_hvc_arch_workaround_1; |
184 | smccc_start = __smccc_workaround_1_hvc_start; | 184 | smccc_start = __smccc_workaround_1_hvc_start; |
@@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data) | |||
188 | case PSCI_CONDUIT_SMC: | 188 | case PSCI_CONDUIT_SMC: |
189 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | 189 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
190 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | 190 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
191 | if (res.a0) | 191 | if ((int)res.a0 < 0) |
192 | return 0; | 192 | return 0; |
193 | cb = call_smc_arch_workaround_1; | 193 | cb = call_smc_arch_workaround_1; |
194 | smccc_start = __smccc_workaround_1_smc_start; | 194 | smccc_start = __smccc_workaround_1_smc_start; |
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index d7e3299a7734..959e50d2588c 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c | |||
@@ -363,8 +363,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
363 | { | 363 | { |
364 | int ret = 0; | 364 | int ret = 0; |
365 | 365 | ||
366 | vcpu_load(vcpu); | ||
367 | |||
368 | trace_kvm_set_guest_debug(vcpu, dbg->control); | 366 | trace_kvm_set_guest_debug(vcpu, dbg->control); |
369 | 367 | ||
370 | if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { | 368 | if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { |
@@ -386,7 +384,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
386 | } | 384 | } |
387 | 385 | ||
388 | out: | 386 | out: |
389 | vcpu_put(vcpu); | ||
390 | return ret; | 387 | return ret; |
391 | } | 388 | } |
392 | 389 | ||
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 84a019f55022..8c704f1e53c2 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -108,7 +108,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new) | |||
108 | * The following mapping attributes may be updated in live | 108 | * The following mapping attributes may be updated in live |
109 | * kernel mappings without the need for break-before-make. | 109 | * kernel mappings without the need for break-before-make. |
110 | */ | 110 | */ |
111 | static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE; | 111 | static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; |
112 | 112 | ||
113 | /* creating or taking down mappings is always safe */ | 113 | /* creating or taking down mappings is always safe */ |
114 | if (old == 0 || new == 0) | 114 | if (old == 0 || new == 0) |
@@ -118,9 +118,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new) | |||
118 | if ((old | new) & PTE_CONT) | 118 | if ((old | new) & PTE_CONT) |
119 | return false; | 119 | return false; |
120 | 120 | ||
121 | /* Transitioning from Global to Non-Global is safe */ | 121 | /* Transitioning from Non-Global to Global is unsafe */ |
122 | if (((old ^ new) == PTE_NG) && (new & PTE_NG)) | 122 | if (old & ~new & PTE_NG) |
123 | return true; | 123 | return false; |
124 | 124 | ||
125 | return ((old ^ new) & ~mask) == 0; | 125 | return ((old ^ new) & ~mask) == 0; |
126 | } | 126 | } |
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 762eeb0fcc1d..2524fb60fbc2 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h | |||
@@ -66,38 +66,35 @@ ATOMIC_OPS(add, +) | |||
66 | ATOMIC_OPS(sub, -) | 66 | ATOMIC_OPS(sub, -) |
67 | 67 | ||
68 | #ifdef __OPTIMIZE__ | 68 | #ifdef __OPTIMIZE__ |
69 | #define __ia64_atomic_const(i) __builtin_constant_p(i) ? \ | 69 | #define __ia64_atomic_const(i) \ |
70 | static const int __ia64_atomic_p = __builtin_constant_p(i) ? \ | ||
70 | ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \ | 71 | ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \ |
71 | (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0 | 72 | (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\ |
73 | __ia64_atomic_p | ||
74 | #else | ||
75 | #define __ia64_atomic_const(i) 0 | ||
76 | #endif | ||
72 | 77 | ||
73 | #define atomic_add_return(i, v) \ | 78 | #define atomic_add_return(i,v) \ |
74 | ({ \ | 79 | ({ \ |
75 | int __i = (i); \ | 80 | int __ia64_aar_i = (i); \ |
76 | static const int __ia64_atomic_p = __ia64_atomic_const(i); \ | 81 | __ia64_atomic_const(i) \ |
77 | __ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) : \ | 82 | ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ |
78 | ia64_atomic_add(__i, v); \ | 83 | : ia64_atomic_add(__ia64_aar_i, v); \ |
79 | }) | 84 | }) |
80 | 85 | ||
81 | #define atomic_sub_return(i, v) \ | 86 | #define atomic_sub_return(i,v) \ |
82 | ({ \ | 87 | ({ \ |
83 | int __i = (i); \ | 88 | int __ia64_asr_i = (i); \ |
84 | static const int __ia64_atomic_p = __ia64_atomic_const(i); \ | 89 | __ia64_atomic_const(i) \ |
85 | __ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) : \ | 90 | ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ |
86 | ia64_atomic_sub(__i, v); \ | 91 | : ia64_atomic_sub(__ia64_asr_i, v); \ |
87 | }) | 92 | }) |
88 | #else | ||
89 | #define atomic_add_return(i, v) ia64_atomic_add(i, v) | ||
90 | #define atomic_sub_return(i, v) ia64_atomic_sub(i, v) | ||
91 | #endif | ||
92 | 93 | ||
93 | #define atomic_fetch_add(i,v) \ | 94 | #define atomic_fetch_add(i,v) \ |
94 | ({ \ | 95 | ({ \ |
95 | int __ia64_aar_i = (i); \ | 96 | int __ia64_aar_i = (i); \ |
96 | (__builtin_constant_p(i) \ | 97 | __ia64_atomic_const(i) \ |
97 | && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ | ||
98 | || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ | ||
99 | || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ | ||
100 | || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ | ||
101 | ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ | 98 | ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ |
102 | : ia64_atomic_fetch_add(__ia64_aar_i, v); \ | 99 | : ia64_atomic_fetch_add(__ia64_aar_i, v); \ |
103 | }) | 100 | }) |
@@ -105,11 +102,7 @@ ATOMIC_OPS(sub, -) | |||
105 | #define atomic_fetch_sub(i,v) \ | 102 | #define atomic_fetch_sub(i,v) \ |
106 | ({ \ | 103 | ({ \ |
107 | int __ia64_asr_i = (i); \ | 104 | int __ia64_asr_i = (i); \ |
108 | (__builtin_constant_p(i) \ | 105 | __ia64_atomic_const(i) \ |
109 | && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ | ||
110 | || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ | ||
111 | || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ | ||
112 | || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ | ||
113 | ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ | 106 | ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ |
114 | : ia64_atomic_fetch_sub(__ia64_asr_i, v); \ | 107 | : ia64_atomic_fetch_sub(__ia64_asr_i, v); \ |
115 | }) | 108 | }) |
@@ -170,11 +163,7 @@ ATOMIC64_OPS(sub, -) | |||
170 | #define atomic64_add_return(i,v) \ | 163 | #define atomic64_add_return(i,v) \ |
171 | ({ \ | 164 | ({ \ |
172 | long __ia64_aar_i = (i); \ | 165 | long __ia64_aar_i = (i); \ |
173 | (__builtin_constant_p(i) \ | 166 | __ia64_atomic_const(i) \ |
174 | && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ | ||
175 | || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ | ||
176 | || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ | ||
177 | || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ | ||
178 | ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ | 167 | ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ |
179 | : ia64_atomic64_add(__ia64_aar_i, v); \ | 168 | : ia64_atomic64_add(__ia64_aar_i, v); \ |
180 | }) | 169 | }) |
@@ -182,11 +171,7 @@ ATOMIC64_OPS(sub, -) | |||
182 | #define atomic64_sub_return(i,v) \ | 171 | #define atomic64_sub_return(i,v) \ |
183 | ({ \ | 172 | ({ \ |
184 | long __ia64_asr_i = (i); \ | 173 | long __ia64_asr_i = (i); \ |
185 | (__builtin_constant_p(i) \ | 174 | __ia64_atomic_const(i) \ |
186 | && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ | ||
187 | || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ | ||
188 | || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ | ||
189 | || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ | ||
190 | ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ | 175 | ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ |
191 | : ia64_atomic64_sub(__ia64_asr_i, v); \ | 176 | : ia64_atomic64_sub(__ia64_asr_i, v); \ |
192 | }) | 177 | }) |
@@ -194,11 +179,7 @@ ATOMIC64_OPS(sub, -) | |||
194 | #define atomic64_fetch_add(i,v) \ | 179 | #define atomic64_fetch_add(i,v) \ |
195 | ({ \ | 180 | ({ \ |
196 | long __ia64_aar_i = (i); \ | 181 | long __ia64_aar_i = (i); \ |
197 | (__builtin_constant_p(i) \ | 182 | __ia64_atomic_const(i) \ |
198 | && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ | ||
199 | || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ | ||
200 | || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ | ||
201 | || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ | ||
202 | ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ | 183 | ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ |
203 | : ia64_atomic64_fetch_add(__ia64_aar_i, v); \ | 184 | : ia64_atomic64_fetch_add(__ia64_aar_i, v); \ |
204 | }) | 185 | }) |
@@ -206,11 +187,7 @@ ATOMIC64_OPS(sub, -) | |||
206 | #define atomic64_fetch_sub(i,v) \ | 187 | #define atomic64_fetch_sub(i,v) \ |
207 | ({ \ | 188 | ({ \ |
208 | long __ia64_asr_i = (i); \ | 189 | long __ia64_asr_i = (i); \ |
209 | (__builtin_constant_p(i) \ | 190 | __ia64_atomic_const(i) \ |
210 | && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ | ||
211 | || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ | ||
212 | || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ | ||
213 | || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ | ||
214 | ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ | 191 | ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ |
215 | : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \ | 192 | : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \ |
216 | }) | 193 | }) |
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index 85bba43e7d5d..8b5b8e6bc9d9 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c | |||
@@ -117,7 +117,7 @@ store_call_start(struct device *dev, struct device_attribute *attr, | |||
117 | 117 | ||
118 | #ifdef ERR_INJ_DEBUG | 118 | #ifdef ERR_INJ_DEBUG |
119 | printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); | 119 | printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); |
120 | printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]); | 120 | printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]); |
121 | printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); | 121 | printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); |
122 | #endif | 122 | #endif |
123 | return size; | 123 | return size; |
@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, | |||
142 | u64 virt_addr=simple_strtoull(buf, NULL, 16); | 142 | u64 virt_addr=simple_strtoull(buf, NULL, 16); |
143 | int ret; | 143 | int ret; |
144 | 144 | ||
145 | ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL); | 145 | ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL); |
146 | if (ret<=0) { | 146 | if (ret<=0) { |
147 | #ifdef ERR_INJ_DEBUG | 147 | #ifdef ERR_INJ_DEBUG |
148 | printk("Virtual address %lx is not existing.\n",virt_addr); | 148 | printk("Virtual address %lx is not existing.\n",virt_addr); |
diff --git a/arch/ia64/scripts/unwcheck.py b/arch/ia64/scripts/unwcheck.py index 89f3a1480a63..c55276e31b6b 100644 --- a/arch/ia64/scripts/unwcheck.py +++ b/arch/ia64/scripts/unwcheck.py | |||
@@ -16,7 +16,7 @@ import re | |||
16 | import sys | 16 | import sys |
17 | 17 | ||
18 | if len(sys.argv) != 2: | 18 | if len(sys.argv) != 2: |
19 | print "Usage: %s FILE" % sys.argv[0] | 19 | print("Usage: %s FILE" % sys.argv[0]) |
20 | sys.exit(2) | 20 | sys.exit(2) |
21 | 21 | ||
22 | readelf = os.getenv("READELF", "readelf") | 22 | readelf = os.getenv("READELF", "readelf") |
@@ -29,7 +29,7 @@ def check_func (func, slots, rlen_sum): | |||
29 | global num_errors | 29 | global num_errors |
30 | num_errors += 1 | 30 | num_errors += 1 |
31 | if not func: func = "[%#x-%#x]" % (start, end) | 31 | if not func: func = "[%#x-%#x]" % (start, end) |
32 | print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) | 32 | print("ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)) |
33 | return | 33 | return |
34 | 34 | ||
35 | num_funcs = 0 | 35 | num_funcs = 0 |
@@ -43,23 +43,23 @@ for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): | |||
43 | check_func(func, slots, rlen_sum) | 43 | check_func(func, slots, rlen_sum) |
44 | 44 | ||
45 | func = m.group(1) | 45 | func = m.group(1) |
46 | start = long(m.group(2), 16) | 46 | start = int(m.group(2), 16) |
47 | end = long(m.group(3), 16) | 47 | end = int(m.group(3), 16) |
48 | slots = 3 * (end - start) / 16 | 48 | slots = 3 * (end - start) / 16 |
49 | rlen_sum = 0L | 49 | rlen_sum = 0 |
50 | num_funcs += 1 | 50 | num_funcs += 1 |
51 | else: | 51 | else: |
52 | m = rlen_pattern.match(line) | 52 | m = rlen_pattern.match(line) |
53 | if m: | 53 | if m: |
54 | rlen_sum += long(m.group(1)) | 54 | rlen_sum += int(m.group(1)) |
55 | check_func(func, slots, rlen_sum) | 55 | check_func(func, slots, rlen_sum) |
56 | 56 | ||
57 | if num_errors == 0: | 57 | if num_errors == 0: |
58 | print "No errors detected in %u functions." % num_funcs | 58 | print("No errors detected in %u functions." % num_funcs) |
59 | else: | 59 | else: |
60 | if num_errors > 1: | 60 | if num_errors > 1: |
61 | err="errors" | 61 | err="errors" |
62 | else: | 62 | else: |
63 | err="error" | 63 | err="error" |
64 | print "%u %s detected in %u functions." % (num_errors, err, num_funcs) | 64 | print("%u %s detected in %u functions." % (num_errors, err, num_funcs)) |
65 | sys.exit(1) | 65 | sys.exit(1) |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 4f798aa671dd..3817a3e2146c 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -24,6 +24,7 @@ config MICROBLAZE | |||
24 | select HAVE_FTRACE_MCOUNT_RECORD | 24 | select HAVE_FTRACE_MCOUNT_RECORD |
25 | select HAVE_FUNCTION_GRAPH_TRACER | 25 | select HAVE_FUNCTION_GRAPH_TRACER |
26 | select HAVE_FUNCTION_TRACER | 26 | select HAVE_FUNCTION_TRACER |
27 | select NO_BOOTMEM | ||
27 | select HAVE_MEMBLOCK | 28 | select HAVE_MEMBLOCK |
28 | select HAVE_MEMBLOCK_NODE_MAP | 29 | select HAVE_MEMBLOCK_NODE_MAP |
29 | select HAVE_OPROFILE | 30 | select HAVE_OPROFILE |
diff --git a/arch/microblaze/Kconfig.platform b/arch/microblaze/Kconfig.platform index 6996f397c16c..f7f1739c11b9 100644 --- a/arch/microblaze/Kconfig.platform +++ b/arch/microblaze/Kconfig.platform | |||
@@ -8,7 +8,6 @@ menu "Platform options" | |||
8 | 8 | ||
9 | config OPT_LIB_FUNCTION | 9 | config OPT_LIB_FUNCTION |
10 | bool "Optimalized lib function" | 10 | bool "Optimalized lib function" |
11 | depends on CPU_LITTLE_ENDIAN | ||
12 | default y | 11 | default y |
13 | help | 12 | help |
14 | Allows turn on optimalized library function (memcpy and memmove). | 13 | Allows turn on optimalized library function (memcpy and memmove). |
@@ -21,6 +20,7 @@ config OPT_LIB_FUNCTION | |||
21 | config OPT_LIB_ASM | 20 | config OPT_LIB_ASM |
22 | bool "Optimalized lib function ASM" | 21 | bool "Optimalized lib function ASM" |
23 | depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1) | 22 | depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1) |
23 | depends on CPU_BIG_ENDIAN | ||
24 | default n | 24 | default n |
25 | help | 25 | help |
26 | Allows turn on optimalized library function (memcpy and memmove). | 26 | Allows turn on optimalized library function (memcpy and memmove). |
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index be84a4d3917f..7c968c1d1729 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h | |||
@@ -44,7 +44,6 @@ void machine_shutdown(void); | |||
44 | void machine_halt(void); | 44 | void machine_halt(void); |
45 | void machine_power_off(void); | 45 | void machine_power_off(void); |
46 | 46 | ||
47 | extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); | ||
48 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); | 47 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); |
49 | 48 | ||
50 | # endif /* __ASSEMBLY__ */ | 49 | # endif /* __ASSEMBLY__ */ |
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S index 62021d7e249e..fdc48bb065d8 100644 --- a/arch/microblaze/lib/fastcopy.S +++ b/arch/microblaze/lib/fastcopy.S | |||
@@ -29,10 +29,6 @@ | |||
29 | * between mem locations with size of xfer spec'd in bytes | 29 | * between mem locations with size of xfer spec'd in bytes |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #ifdef __MICROBLAZEEL__ | ||
33 | #error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM. | ||
34 | #endif | ||
35 | |||
36 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
37 | .text | 33 | .text |
38 | .globl memcpy | 34 | .globl memcpy |
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 434639f9a3a6..df6de7ccdc2e 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -32,9 +32,6 @@ int mem_init_done; | |||
32 | #ifndef CONFIG_MMU | 32 | #ifndef CONFIG_MMU |
33 | unsigned int __page_offset; | 33 | unsigned int __page_offset; |
34 | EXPORT_SYMBOL(__page_offset); | 34 | EXPORT_SYMBOL(__page_offset); |
35 | |||
36 | #else | ||
37 | static int init_bootmem_done; | ||
38 | #endif /* CONFIG_MMU */ | 35 | #endif /* CONFIG_MMU */ |
39 | 36 | ||
40 | char *klimit = _end; | 37 | char *klimit = _end; |
@@ -117,7 +114,6 @@ static void __init paging_init(void) | |||
117 | 114 | ||
118 | void __init setup_memory(void) | 115 | void __init setup_memory(void) |
119 | { | 116 | { |
120 | unsigned long map_size; | ||
121 | struct memblock_region *reg; | 117 | struct memblock_region *reg; |
122 | 118 | ||
123 | #ifndef CONFIG_MMU | 119 | #ifndef CONFIG_MMU |
@@ -174,17 +170,6 @@ void __init setup_memory(void) | |||
174 | pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); | 170 | pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); |
175 | pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); | 171 | pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); |
176 | 172 | ||
177 | /* | ||
178 | * Find an area to use for the bootmem bitmap. | ||
179 | * We look for the first area which is at least | ||
180 | * 128kB in length (128kB is enough for a bitmap | ||
181 | * for 4GB of memory, using 4kB pages), plus 1 page | ||
182 | * (in case the address isn't page-aligned). | ||
183 | */ | ||
184 | map_size = init_bootmem_node(NODE_DATA(0), | ||
185 | PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); | ||
186 | memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); | ||
187 | |||
188 | /* Add active regions with valid PFNs */ | 173 | /* Add active regions with valid PFNs */ |
189 | for_each_memblock(memory, reg) { | 174 | for_each_memblock(memory, reg) { |
190 | unsigned long start_pfn, end_pfn; | 175 | unsigned long start_pfn, end_pfn; |
@@ -196,32 +181,9 @@ void __init setup_memory(void) | |||
196 | &memblock.memory, 0); | 181 | &memblock.memory, 0); |
197 | } | 182 | } |
198 | 183 | ||
199 | /* free bootmem is whole main memory */ | ||
200 | free_bootmem_with_active_regions(0, max_low_pfn); | ||
201 | |||
202 | /* reserve allocate blocks */ | ||
203 | for_each_memblock(reserved, reg) { | ||
204 | unsigned long top = reg->base + reg->size - 1; | ||
205 | |||
206 | pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n", | ||
207 | (u32) reg->base, (u32) reg->size, top, | ||
208 | memory_start + lowmem_size - 1); | ||
209 | |||
210 | if (top <= (memory_start + lowmem_size - 1)) { | ||
211 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); | ||
212 | } else if (reg->base < (memory_start + lowmem_size - 1)) { | ||
213 | unsigned long trunc_size = memory_start + lowmem_size - | ||
214 | reg->base; | ||
215 | reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* XXX need to clip this if using highmem? */ | 184 | /* XXX need to clip this if using highmem? */ |
220 | sparse_memory_present_with_active_regions(0); | 185 | sparse_memory_present_with_active_regions(0); |
221 | 186 | ||
222 | #ifdef CONFIG_MMU | ||
223 | init_bootmem_done = 1; | ||
224 | #endif | ||
225 | paging_init(); | 187 | paging_init(); |
226 | } | 188 | } |
227 | 189 | ||
@@ -398,30 +360,16 @@ asmlinkage void __init mmu_init(void) | |||
398 | /* This is only called until mem_init is done. */ | 360 | /* This is only called until mem_init is done. */ |
399 | void __init *early_get_page(void) | 361 | void __init *early_get_page(void) |
400 | { | 362 | { |
401 | void *p; | 363 | /* |
402 | if (init_bootmem_done) { | 364 | * Mem start + kernel_tlb -> here is limit |
403 | p = alloc_bootmem_pages(PAGE_SIZE); | 365 | * because of mem mapping from head.S |
404 | } else { | 366 | */ |
405 | /* | 367 | return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, |
406 | * Mem start + kernel_tlb -> here is limit | 368 | memory_start + kernel_tlb)); |
407 | * because of mem mapping from head.S | ||
408 | */ | ||
409 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
410 | memory_start + kernel_tlb)); | ||
411 | } | ||
412 | return p; | ||
413 | } | 369 | } |
414 | 370 | ||
415 | #endif /* CONFIG_MMU */ | 371 | #endif /* CONFIG_MMU */ |
416 | 372 | ||
417 | void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask) | ||
418 | { | ||
419 | if (mem_init_done) | ||
420 | return kmalloc(size, mask); | ||
421 | else | ||
422 | return alloc_bootmem(size); | ||
423 | } | ||
424 | |||
425 | void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) | 373 | void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) |
426 | { | 374 | { |
427 | void *p; | 375 | void *p; |
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c index 9ab48ff80c1c..6d11ae581ea7 100644 --- a/arch/mips/ath25/board.c +++ b/arch/mips/ath25/board.c | |||
@@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size) | |||
135 | } | 135 | } |
136 | 136 | ||
137 | board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL); | 137 | board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL); |
138 | if (!board_data) | ||
139 | goto error; | ||
138 | ath25_board.config = (struct ath25_boarddata *)board_data; | 140 | ath25_board.config = (struct ath25_boarddata *)board_data; |
139 | memcpy_fromio(board_data, bcfg, 0x100); | 141 | memcpy_fromio(board_data, bcfg, 0x100); |
140 | if (broken_boarddata) { | 142 | if (broken_boarddata) { |
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 5b3a3f6a9ad3..d99f5242169e 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, | |||
2277 | } | 2277 | } |
2278 | 2278 | ||
2279 | host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); | 2279 | host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); |
2280 | if (!host_data) | ||
2281 | return -ENOMEM; | ||
2280 | raw_spin_lock_init(&host_data->lock); | 2282 | raw_spin_lock_init(&host_data->lock); |
2281 | 2283 | ||
2282 | addr = of_get_address(ciu_node, 0, NULL, NULL); | 2284 | addr = of_get_address(ciu_node, 0, NULL, NULL); |
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 9d41732a9146..159e83add4bb 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c | |||
@@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus) | |||
168 | return; | 168 | return; |
169 | } | 169 | } |
170 | 170 | ||
171 | if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, | 171 | if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, |
172 | "smp_ipi0", NULL)) | 172 | IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL)) |
173 | panic("Can't request IPI0 interrupt"); | 173 | panic("Can't request IPI0 interrupt"); |
174 | if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, | 174 | if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, |
175 | "smp_ipi1", NULL)) | 175 | IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL)) |
176 | panic("Can't request IPI1 interrupt"); | 176 | panic("Can't request IPI1 interrupt"); |
177 | } | 177 | } |
178 | 178 | ||
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig index bc2fdbfa8223..72af0c183969 100644 --- a/arch/mips/loongson64/Kconfig +++ b/arch/mips/loongson64/Kconfig | |||
@@ -7,6 +7,8 @@ choice | |||
7 | config LEMOTE_FULOONG2E | 7 | config LEMOTE_FULOONG2E |
8 | bool "Lemote Fuloong(2e) mini-PC" | 8 | bool "Lemote Fuloong(2e) mini-PC" |
9 | select ARCH_SPARSEMEM_ENABLE | 9 | select ARCH_SPARSEMEM_ENABLE |
10 | select ARCH_MIGHT_HAVE_PC_PARPORT | ||
11 | select ARCH_MIGHT_HAVE_PC_SERIO | ||
10 | select CEVT_R4K | 12 | select CEVT_R4K |
11 | select CSRC_R4K | 13 | select CSRC_R4K |
12 | select SYS_HAS_CPU_LOONGSON2E | 14 | select SYS_HAS_CPU_LOONGSON2E |
@@ -33,6 +35,8 @@ config LEMOTE_FULOONG2E | |||
33 | config LEMOTE_MACH2F | 35 | config LEMOTE_MACH2F |
34 | bool "Lemote Loongson 2F family machines" | 36 | bool "Lemote Loongson 2F family machines" |
35 | select ARCH_SPARSEMEM_ENABLE | 37 | select ARCH_SPARSEMEM_ENABLE |
38 | select ARCH_MIGHT_HAVE_PC_PARPORT | ||
39 | select ARCH_MIGHT_HAVE_PC_SERIO | ||
36 | select BOARD_SCACHE | 40 | select BOARD_SCACHE |
37 | select BOOT_ELF32 | 41 | select BOOT_ELF32 |
38 | select CEVT_R4K if ! MIPS_EXTERNAL_TIMER | 42 | select CEVT_R4K if ! MIPS_EXTERNAL_TIMER |
@@ -62,6 +66,8 @@ config LEMOTE_MACH2F | |||
62 | config LOONGSON_MACH3X | 66 | config LOONGSON_MACH3X |
63 | bool "Generic Loongson 3 family machines" | 67 | bool "Generic Loongson 3 family machines" |
64 | select ARCH_SPARSEMEM_ENABLE | 68 | select ARCH_SPARSEMEM_ENABLE |
69 | select ARCH_MIGHT_HAVE_PC_PARPORT | ||
70 | select ARCH_MIGHT_HAVE_PC_SERIO | ||
65 | select GENERIC_ISA_DMA_SUPPORT_BROKEN | 71 | select GENERIC_ISA_DMA_SUPPORT_BROKEN |
66 | select BOOT_ELF32 | 72 | select BOOT_ELF32 |
67 | select BOARD_SCACHE | 73 | select BOARD_SCACHE |
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 3742508cc534..bd5ce31936f5 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h | |||
@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long); | |||
26 | void flush_kernel_icache_range_asm(unsigned long, unsigned long); | 26 | void flush_kernel_icache_range_asm(unsigned long, unsigned long); |
27 | void flush_user_dcache_range_asm(unsigned long, unsigned long); | 27 | void flush_user_dcache_range_asm(unsigned long, unsigned long); |
28 | void flush_kernel_dcache_range_asm(unsigned long, unsigned long); | 28 | void flush_kernel_dcache_range_asm(unsigned long, unsigned long); |
29 | void purge_kernel_dcache_range_asm(unsigned long, unsigned long); | ||
29 | void flush_kernel_dcache_page_asm(void *); | 30 | void flush_kernel_dcache_page_asm(void *); |
30 | void flush_kernel_icache_page(void *); | 31 | void flush_kernel_icache_page(void *); |
31 | 32 | ||
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 0e6ab6e4a4e9..2dbe5580a1a4 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h | |||
@@ -316,6 +316,8 @@ extern int _parisc_requires_coherency; | |||
316 | #define parisc_requires_coherency() (0) | 316 | #define parisc_requires_coherency() (0) |
317 | #endif | 317 | #endif |
318 | 318 | ||
319 | extern int running_on_qemu; | ||
320 | |||
319 | #endif /* __ASSEMBLY__ */ | 321 | #endif /* __ASSEMBLY__ */ |
320 | 322 | ||
321 | #endif /* __ASM_PARISC_PROCESSOR_H */ | 323 | #endif /* __ASM_PARISC_PROCESSOR_H */ |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 19c0c141bc3f..e3b45546d589 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page); | |||
465 | int __flush_tlb_range(unsigned long sid, unsigned long start, | 465 | int __flush_tlb_range(unsigned long sid, unsigned long start, |
466 | unsigned long end) | 466 | unsigned long end) |
467 | { | 467 | { |
468 | unsigned long flags, size; | 468 | unsigned long flags; |
469 | 469 | ||
470 | size = (end - start); | 470 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
471 | if (size >= parisc_tlb_flush_threshold) { | 471 | end - start >= parisc_tlb_flush_threshold) { |
472 | flush_tlb_all(); | 472 | flush_tlb_all(); |
473 | return 1; | 473 | return 1; |
474 | } | 474 | } |
@@ -539,13 +539,12 @@ void flush_cache_mm(struct mm_struct *mm) | |||
539 | struct vm_area_struct *vma; | 539 | struct vm_area_struct *vma; |
540 | pgd_t *pgd; | 540 | pgd_t *pgd; |
541 | 541 | ||
542 | /* Flush the TLB to avoid speculation if coherency is required. */ | ||
543 | if (parisc_requires_coherency()) | ||
544 | flush_tlb_all(); | ||
545 | |||
546 | /* Flushing the whole cache on each cpu takes forever on | 542 | /* Flushing the whole cache on each cpu takes forever on |
547 | rp3440, etc. So, avoid it if the mm isn't too big. */ | 543 | rp3440, etc. So, avoid it if the mm isn't too big. */ |
548 | if (mm_total_size(mm) >= parisc_cache_flush_threshold) { | 544 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
545 | mm_total_size(mm) >= parisc_cache_flush_threshold) { | ||
546 | if (mm->context) | ||
547 | flush_tlb_all(); | ||
549 | flush_cache_all(); | 548 | flush_cache_all(); |
550 | return; | 549 | return; |
551 | } | 550 | } |
@@ -553,9 +552,9 @@ void flush_cache_mm(struct mm_struct *mm) | |||
553 | if (mm->context == mfsp(3)) { | 552 | if (mm->context == mfsp(3)) { |
554 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 553 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
555 | flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); | 554 | flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); |
556 | if ((vma->vm_flags & VM_EXEC) == 0) | 555 | if (vma->vm_flags & VM_EXEC) |
557 | continue; | 556 | flush_user_icache_range_asm(vma->vm_start, vma->vm_end); |
558 | flush_user_icache_range_asm(vma->vm_start, vma->vm_end); | 557 | flush_tlb_range(vma, vma->vm_start, vma->vm_end); |
559 | } | 558 | } |
560 | return; | 559 | return; |
561 | } | 560 | } |
@@ -573,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm) | |||
573 | pfn = pte_pfn(*ptep); | 572 | pfn = pte_pfn(*ptep); |
574 | if (!pfn_valid(pfn)) | 573 | if (!pfn_valid(pfn)) |
575 | continue; | 574 | continue; |
575 | if (unlikely(mm->context)) | ||
576 | flush_tlb_page(vma, addr); | ||
576 | __flush_cache_page(vma, addr, PFN_PHYS(pfn)); | 577 | __flush_cache_page(vma, addr, PFN_PHYS(pfn)); |
577 | } | 578 | } |
578 | } | 579 | } |
@@ -581,30 +582,45 @@ void flush_cache_mm(struct mm_struct *mm) | |||
581 | void flush_cache_range(struct vm_area_struct *vma, | 582 | void flush_cache_range(struct vm_area_struct *vma, |
582 | unsigned long start, unsigned long end) | 583 | unsigned long start, unsigned long end) |
583 | { | 584 | { |
584 | BUG_ON(!vma->vm_mm->context); | 585 | pgd_t *pgd; |
585 | 586 | unsigned long addr; | |
586 | /* Flush the TLB to avoid speculation if coherency is required. */ | ||
587 | if (parisc_requires_coherency()) | ||
588 | flush_tlb_range(vma, start, end); | ||
589 | 587 | ||
590 | if ((end - start) >= parisc_cache_flush_threshold | 588 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
591 | || vma->vm_mm->context != mfsp(3)) { | 589 | end - start >= parisc_cache_flush_threshold) { |
590 | if (vma->vm_mm->context) | ||
591 | flush_tlb_range(vma, start, end); | ||
592 | flush_cache_all(); | 592 | flush_cache_all(); |
593 | return; | 593 | return; |
594 | } | 594 | } |
595 | 595 | ||
596 | flush_user_dcache_range_asm(start, end); | 596 | if (vma->vm_mm->context == mfsp(3)) { |
597 | if (vma->vm_flags & VM_EXEC) | 597 | flush_user_dcache_range_asm(start, end); |
598 | flush_user_icache_range_asm(start, end); | 598 | if (vma->vm_flags & VM_EXEC) |
599 | flush_user_icache_range_asm(start, end); | ||
600 | flush_tlb_range(vma, start, end); | ||
601 | return; | ||
602 | } | ||
603 | |||
604 | pgd = vma->vm_mm->pgd; | ||
605 | for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { | ||
606 | unsigned long pfn; | ||
607 | pte_t *ptep = get_ptep(pgd, addr); | ||
608 | if (!ptep) | ||
609 | continue; | ||
610 | pfn = pte_pfn(*ptep); | ||
611 | if (pfn_valid(pfn)) { | ||
612 | if (unlikely(vma->vm_mm->context)) | ||
613 | flush_tlb_page(vma, addr); | ||
614 | __flush_cache_page(vma, addr, PFN_PHYS(pfn)); | ||
615 | } | ||
616 | } | ||
599 | } | 617 | } |
600 | 618 | ||
601 | void | 619 | void |
602 | flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) | 620 | flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) |
603 | { | 621 | { |
604 | BUG_ON(!vma->vm_mm->context); | ||
605 | |||
606 | if (pfn_valid(pfn)) { | 622 | if (pfn_valid(pfn)) { |
607 | if (parisc_requires_coherency()) | 623 | if (likely(vma->vm_mm->context)) |
608 | flush_tlb_page(vma, vmaddr); | 624 | flush_tlb_page(vma, vmaddr); |
609 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); | 625 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); |
610 | } | 626 | } |
@@ -613,21 +629,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long | |||
613 | void flush_kernel_vmap_range(void *vaddr, int size) | 629 | void flush_kernel_vmap_range(void *vaddr, int size) |
614 | { | 630 | { |
615 | unsigned long start = (unsigned long)vaddr; | 631 | unsigned long start = (unsigned long)vaddr; |
632 | unsigned long end = start + size; | ||
616 | 633 | ||
617 | if ((unsigned long)size > parisc_cache_flush_threshold) | 634 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
635 | (unsigned long)size >= parisc_cache_flush_threshold) { | ||
636 | flush_tlb_kernel_range(start, end); | ||
618 | flush_data_cache(); | 637 | flush_data_cache(); |
619 | else | 638 | return; |
620 | flush_kernel_dcache_range_asm(start, start + size); | 639 | } |
640 | |||
641 | flush_kernel_dcache_range_asm(start, end); | ||
642 | flush_tlb_kernel_range(start, end); | ||
621 | } | 643 | } |
622 | EXPORT_SYMBOL(flush_kernel_vmap_range); | 644 | EXPORT_SYMBOL(flush_kernel_vmap_range); |
623 | 645 | ||
624 | void invalidate_kernel_vmap_range(void *vaddr, int size) | 646 | void invalidate_kernel_vmap_range(void *vaddr, int size) |
625 | { | 647 | { |
626 | unsigned long start = (unsigned long)vaddr; | 648 | unsigned long start = (unsigned long)vaddr; |
649 | unsigned long end = start + size; | ||
627 | 650 | ||
628 | if ((unsigned long)size > parisc_cache_flush_threshold) | 651 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
652 | (unsigned long)size >= parisc_cache_flush_threshold) { | ||
653 | flush_tlb_kernel_range(start, end); | ||
629 | flush_data_cache(); | 654 | flush_data_cache(); |
630 | else | 655 | return; |
631 | flush_kernel_dcache_range_asm(start, start + size); | 656 | } |
657 | |||
658 | purge_kernel_dcache_range_asm(start, end); | ||
659 | flush_tlb_kernel_range(start, end); | ||
632 | } | 660 | } |
633 | EXPORT_SYMBOL(invalidate_kernel_vmap_range); | 661 | EXPORT_SYMBOL(invalidate_kernel_vmap_range); |
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index bbbe360b458f..fbb4e43fda05 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S | |||
@@ -138,6 +138,16 @@ $pgt_fill_loop: | |||
138 | std %dp,0x18(%r10) | 138 | std %dp,0x18(%r10) |
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | #ifdef CONFIG_64BIT | ||
142 | /* Get PDCE_PROC for monarch CPU. */ | ||
143 | #define MEM_PDC_LO 0x388 | ||
144 | #define MEM_PDC_HI 0x35C | ||
145 | ldw MEM_PDC_LO(%r0),%r3 | ||
146 | ldw MEM_PDC_HI(%r0),%r10 | ||
147 | depd %r10, 31, 32, %r3 /* move to upper word */ | ||
148 | #endif | ||
149 | |||
150 | |||
141 | #ifdef CONFIG_SMP | 151 | #ifdef CONFIG_SMP |
142 | /* Set the smp rendezvous address into page zero. | 152 | /* Set the smp rendezvous address into page zero. |
143 | ** It would be safer to do this in init_smp_config() but | 153 | ** It would be safer to do this in init_smp_config() but |
@@ -196,12 +206,6 @@ common_stext: | |||
196 | ** Someday, palo might not do this for the Monarch either. | 206 | ** Someday, palo might not do this for the Monarch either. |
197 | */ | 207 | */ |
198 | 2: | 208 | 2: |
199 | #define MEM_PDC_LO 0x388 | ||
200 | #define MEM_PDC_HI 0x35C | ||
201 | ldw MEM_PDC_LO(%r0),%r3 | ||
202 | ldw MEM_PDC_HI(%r0),%r6 | ||
203 | depd %r6, 31, 32, %r3 /* move to upper word */ | ||
204 | |||
205 | mfctl %cr30,%r6 /* PCX-W2 firmware bug */ | 209 | mfctl %cr30,%r6 /* PCX-W2 firmware bug */ |
206 | 210 | ||
207 | ldo PDC_PSW(%r0),%arg0 /* 21 */ | 211 | ldo PDC_PSW(%r0),%arg0 /* 21 */ |
@@ -268,6 +272,8 @@ $install_iva: | |||
268 | aligned_rfi: | 272 | aligned_rfi: |
269 | pcxt_ssm_bug | 273 | pcxt_ssm_bug |
270 | 274 | ||
275 | copy %r3, %arg0 /* PDCE_PROC for smp_callin() */ | ||
276 | |||
271 | rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */ | 277 | rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */ |
272 | /* Don't need NOPs, have 8 compliant insn before rfi */ | 278 | /* Don't need NOPs, have 8 compliant insn before rfi */ |
273 | 279 | ||
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 2d40c4ff3f69..67b0f7532e83 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S | |||
@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) | |||
1110 | .procend | 1110 | .procend |
1111 | ENDPROC_CFI(flush_kernel_dcache_range_asm) | 1111 | ENDPROC_CFI(flush_kernel_dcache_range_asm) |
1112 | 1112 | ||
1113 | ENTRY_CFI(purge_kernel_dcache_range_asm) | ||
1114 | .proc | ||
1115 | .callinfo NO_CALLS | ||
1116 | .entry | ||
1117 | |||
1118 | ldil L%dcache_stride, %r1 | ||
1119 | ldw R%dcache_stride(%r1), %r23 | ||
1120 | ldo -1(%r23), %r21 | ||
1121 | ANDCM %r26, %r21, %r26 | ||
1122 | |||
1123 | 1: cmpb,COND(<<),n %r26, %r25,1b | ||
1124 | pdc,m %r23(%r26) | ||
1125 | |||
1126 | sync | ||
1127 | syncdma | ||
1128 | bv %r0(%r2) | ||
1129 | nop | ||
1130 | .exit | ||
1131 | |||
1132 | .procend | ||
1133 | ENDPROC_CFI(purge_kernel_dcache_range_asm) | ||
1134 | |||
1113 | ENTRY_CFI(flush_user_icache_range_asm) | 1135 | ENTRY_CFI(flush_user_icache_range_asm) |
1114 | .proc | 1136 | .proc |
1115 | .callinfo NO_CALLS | 1137 | .callinfo NO_CALLS |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 30c28ab14540..4065b5e48c9d 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -292,10 +292,15 @@ smp_cpu_init(int cpunum) | |||
292 | * Slaves start using C here. Indirectly called from smp_slave_stext. | 292 | * Slaves start using C here. Indirectly called from smp_slave_stext. |
293 | * Do what start_kernel() and main() do for boot strap processor (aka monarch) | 293 | * Do what start_kernel() and main() do for boot strap processor (aka monarch) |
294 | */ | 294 | */ |
295 | void __init smp_callin(void) | 295 | void __init smp_callin(unsigned long pdce_proc) |
296 | { | 296 | { |
297 | int slave_id = cpu_now_booting; | 297 | int slave_id = cpu_now_booting; |
298 | 298 | ||
299 | #ifdef CONFIG_64BIT | ||
300 | WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32 | ||
301 | | PAGE0->mem_pdc) != pdce_proc); | ||
302 | #endif | ||
303 | |||
299 | smp_cpu_init(slave_id); | 304 | smp_cpu_init(slave_id); |
300 | preempt_disable(); | 305 | preempt_disable(); |
301 | 306 | ||
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 4b8fd6dc22da..f7e684560186 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) | |||
76 | next_tick = cpuinfo->it_value; | 76 | next_tick = cpuinfo->it_value; |
77 | 77 | ||
78 | /* Calculate how many ticks have elapsed. */ | 78 | /* Calculate how many ticks have elapsed. */ |
79 | now = mfctl(16); | ||
79 | do { | 80 | do { |
80 | ++ticks_elapsed; | 81 | ++ticks_elapsed; |
81 | next_tick += cpt; | 82 | next_tick += cpt; |
82 | now = mfctl(16); | ||
83 | } while (next_tick - now > cpt); | 83 | } while (next_tick - now > cpt); |
84 | 84 | ||
85 | /* Store (in CR16 cycles) up to when we are accounting right now. */ | 85 | /* Store (in CR16 cycles) up to when we are accounting right now. */ |
@@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) | |||
103 | * if one or the other wrapped. If "now" is "bigger" we'll end up | 103 | * if one or the other wrapped. If "now" is "bigger" we'll end up |
104 | * with a very large unsigned number. | 104 | * with a very large unsigned number. |
105 | */ | 105 | */ |
106 | while (next_tick - mfctl(16) > cpt) | 106 | now = mfctl(16); |
107 | while (next_tick - now > cpt) | ||
107 | next_tick += cpt; | 108 | next_tick += cpt; |
108 | 109 | ||
109 | /* Program the IT when to deliver the next interrupt. | 110 | /* Program the IT when to deliver the next interrupt. |
110 | * Only bottom 32-bits of next_tick are writable in CR16! | 111 | * Only bottom 32-bits of next_tick are writable in CR16! |
111 | * Timer interrupt will be delivered at least a few hundred cycles | 112 | * Timer interrupt will be delivered at least a few hundred cycles |
112 | * after the IT fires, so if we are too close (<= 500 cycles) to the | 113 | * after the IT fires, so if we are too close (<= 8000 cycles) to the |
113 | * next cycle, simply skip it. | 114 | * next cycle, simply skip it. |
114 | */ | 115 | */ |
115 | if (next_tick - mfctl(16) <= 500) | 116 | if (next_tick - now <= 8000) |
116 | next_tick += cpt; | 117 | next_tick += cpt; |
117 | mtctl(next_tick, 16); | 118 | mtctl(next_tick, 16); |
118 | 119 | ||
@@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void) | |||
248 | * different sockets, so mark them unstable and lower rating on | 249 | * different sockets, so mark them unstable and lower rating on |
249 | * multi-socket SMP systems. | 250 | * multi-socket SMP systems. |
250 | */ | 251 | */ |
251 | if (num_online_cpus() > 1) { | 252 | if (num_online_cpus() > 1 && !running_on_qemu) { |
252 | int cpu; | 253 | int cpu; |
253 | unsigned long cpu0_loc; | 254 | unsigned long cpu0_loc; |
254 | cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; | 255 | cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 48f41399fc0b..cab32ee824d2 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -629,7 +629,12 @@ void __init mem_init(void) | |||
629 | #endif | 629 | #endif |
630 | 630 | ||
631 | mem_init_print_info(NULL); | 631 | mem_init_print_info(NULL); |
632 | #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ | 632 | |
633 | #if 0 | ||
634 | /* | ||
635 | * Do not expose the virtual kernel memory layout to userspace. | ||
636 | * But keep code for debugging purposes. | ||
637 | */ | ||
633 | printk("virtual kernel memory layout:\n" | 638 | printk("virtual kernel memory layout:\n" |
634 | " vmalloc : 0x%px - 0x%px (%4ld MB)\n" | 639 | " vmalloc : 0x%px - 0x%px (%4ld MB)\n" |
635 | " memory : 0x%px - 0x%px (%4ld MB)\n" | 640 | " memory : 0x%px - 0x%px (%4ld MB)\n" |
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index ef6549e57157..26d5d2a5b8e9 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \ | |||
101 | libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c | 101 | libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c |
102 | libfdtheader := fdt.h libfdt.h libfdt_internal.h | 102 | libfdtheader := fdt.h libfdt.h libfdt_internal.h |
103 | 103 | ||
104 | $(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \ | 104 | $(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \ |
105 | treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \ | ||
105 | $(addprefix $(obj)/,$(libfdtheader)) | 106 | $(addprefix $(obj)/,$(libfdtheader)) |
106 | 107 | ||
107 | src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ | 108 | src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index d22c41c26bb3..acf4b2e0530c 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -874,7 +874,6 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { | |||
874 | .mmu = 0, | 874 | .mmu = 0, |
875 | .hash_ext = 0, | 875 | .hash_ext = 0, |
876 | .radix_ext = 0, | 876 | .radix_ext = 0, |
877 | .byte22 = 0, | ||
878 | }, | 877 | }, |
879 | 878 | ||
880 | /* option vector 6: IBM PAPR hints */ | 879 | /* option vector 6: IBM PAPR hints */ |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 0c854816e653..5cb4e4687107 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c | |||
@@ -195,6 +195,12 @@ static void kvmppc_pte_free(pte_t *ptep) | |||
195 | kmem_cache_free(kvm_pte_cache, ptep); | 195 | kmem_cache_free(kvm_pte_cache, ptep); |
196 | } | 196 | } |
197 | 197 | ||
198 | /* Like pmd_huge() and pmd_large(), but works regardless of config options */ | ||
199 | static inline int pmd_is_leaf(pmd_t pmd) | ||
200 | { | ||
201 | return !!(pmd_val(pmd) & _PAGE_PTE); | ||
202 | } | ||
203 | |||
198 | static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | 204 | static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, |
199 | unsigned int level, unsigned long mmu_seq) | 205 | unsigned int level, unsigned long mmu_seq) |
200 | { | 206 | { |
@@ -219,7 +225,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | |||
219 | else | 225 | else |
220 | new_pmd = pmd_alloc_one(kvm->mm, gpa); | 226 | new_pmd = pmd_alloc_one(kvm->mm, gpa); |
221 | 227 | ||
222 | if (level == 0 && !(pmd && pmd_present(*pmd))) | 228 | if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd))) |
223 | new_ptep = kvmppc_pte_alloc(); | 229 | new_ptep = kvmppc_pte_alloc(); |
224 | 230 | ||
225 | /* Check if we might have been invalidated; let the guest retry if so */ | 231 | /* Check if we might have been invalidated; let the guest retry if so */ |
@@ -244,12 +250,30 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | |||
244 | new_pmd = NULL; | 250 | new_pmd = NULL; |
245 | } | 251 | } |
246 | pmd = pmd_offset(pud, gpa); | 252 | pmd = pmd_offset(pud, gpa); |
247 | if (pmd_large(*pmd)) { | 253 | if (pmd_is_leaf(*pmd)) { |
248 | /* Someone else has instantiated a large page here; retry */ | 254 | unsigned long lgpa = gpa & PMD_MASK; |
249 | ret = -EAGAIN; | 255 | |
250 | goto out_unlock; | 256 | /* |
251 | } | 257 | * If we raced with another CPU which has just put |
252 | if (level == 1 && !pmd_none(*pmd)) { | 258 | * a 2MB pte in after we saw a pte page, try again. |
259 | */ | ||
260 | if (level == 0 && !new_ptep) { | ||
261 | ret = -EAGAIN; | ||
262 | goto out_unlock; | ||
263 | } | ||
264 | /* Valid 2MB page here already, remove it */ | ||
265 | old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), | ||
266 | ~0UL, 0, lgpa, PMD_SHIFT); | ||
267 | kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT); | ||
268 | if (old & _PAGE_DIRTY) { | ||
269 | unsigned long gfn = lgpa >> PAGE_SHIFT; | ||
270 | struct kvm_memory_slot *memslot; | ||
271 | memslot = gfn_to_memslot(kvm, gfn); | ||
272 | if (memslot && memslot->dirty_bitmap) | ||
273 | kvmppc_update_dirty_map(memslot, | ||
274 | gfn, PMD_SIZE); | ||
275 | } | ||
276 | } else if (level == 1 && !pmd_none(*pmd)) { | ||
253 | /* | 277 | /* |
254 | * There's a page table page here, but we wanted | 278 | * There's a page table page here, but we wanted |
255 | * to install a large page. Tell the caller and let | 279 | * to install a large page. Tell the caller and let |
@@ -412,28 +436,24 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
412 | } else { | 436 | } else { |
413 | page = pages[0]; | 437 | page = pages[0]; |
414 | pfn = page_to_pfn(page); | 438 | pfn = page_to_pfn(page); |
415 | if (PageHuge(page)) { | 439 | if (PageCompound(page)) { |
416 | page = compound_head(page); | 440 | pte_size <<= compound_order(compound_head(page)); |
417 | pte_size <<= compound_order(page); | ||
418 | /* See if we can insert a 2MB large-page PTE here */ | 441 | /* See if we can insert a 2MB large-page PTE here */ |
419 | if (pte_size >= PMD_SIZE && | 442 | if (pte_size >= PMD_SIZE && |
420 | (gpa & PMD_MASK & PAGE_MASK) == | 443 | (gpa & (PMD_SIZE - PAGE_SIZE)) == |
421 | (hva & PMD_MASK & PAGE_MASK)) { | 444 | (hva & (PMD_SIZE - PAGE_SIZE))) { |
422 | level = 1; | 445 | level = 1; |
423 | pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1); | 446 | pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1); |
424 | } | 447 | } |
425 | } | 448 | } |
426 | /* See if we can provide write access */ | 449 | /* See if we can provide write access */ |
427 | if (writing) { | 450 | if (writing) { |
428 | /* | ||
429 | * We assume gup_fast has set dirty on the host PTE. | ||
430 | */ | ||
431 | pgflags |= _PAGE_WRITE; | 451 | pgflags |= _PAGE_WRITE; |
432 | } else { | 452 | } else { |
433 | local_irq_save(flags); | 453 | local_irq_save(flags); |
434 | ptep = find_current_mm_pte(current->mm->pgd, | 454 | ptep = find_current_mm_pte(current->mm->pgd, |
435 | hva, NULL, NULL); | 455 | hva, NULL, NULL); |
436 | if (ptep && pte_write(*ptep) && pte_dirty(*ptep)) | 456 | if (ptep && pte_write(*ptep)) |
437 | pgflags |= _PAGE_WRITE; | 457 | pgflags |= _PAGE_WRITE; |
438 | local_irq_restore(flags); | 458 | local_irq_restore(flags); |
439 | } | 459 | } |
@@ -459,18 +479,15 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
459 | pte = pfn_pte(pfn, __pgprot(pgflags)); | 479 | pte = pfn_pte(pfn, __pgprot(pgflags)); |
460 | ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); | 480 | ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); |
461 | } | 481 | } |
462 | if (ret == 0 || ret == -EAGAIN) | ||
463 | ret = RESUME_GUEST; | ||
464 | 482 | ||
465 | if (page) { | 483 | if (page) { |
466 | /* | 484 | if (!ret && (pgflags & _PAGE_WRITE)) |
467 | * We drop pages[0] here, not page because page might | 485 | set_page_dirty_lock(page); |
468 | * have been set to the head page of a compound, but | 486 | put_page(page); |
469 | * we have to drop the reference on the correct tail | ||
470 | * page to match the get inside gup() | ||
471 | */ | ||
472 | put_page(pages[0]); | ||
473 | } | 487 | } |
488 | |||
489 | if (ret == 0 || ret == -EAGAIN) | ||
490 | ret = RESUME_GUEST; | ||
474 | return ret; | 491 | return ret; |
475 | } | 492 | } |
476 | 493 | ||
@@ -644,7 +661,7 @@ void kvmppc_free_radix(struct kvm *kvm) | |||
644 | continue; | 661 | continue; |
645 | pmd = pmd_offset(pud, 0); | 662 | pmd = pmd_offset(pud, 0); |
646 | for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) { | 663 | for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) { |
647 | if (pmd_huge(*pmd)) { | 664 | if (pmd_is_leaf(*pmd)) { |
648 | pmd_clear(pmd); | 665 | pmd_clear(pmd); |
649 | continue; | 666 | continue; |
650 | } | 667 | } |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 89707354c2ef..9cb9448163c4 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -2885,7 +2885,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
2885 | */ | 2885 | */ |
2886 | trace_hardirqs_on(); | 2886 | trace_hardirqs_on(); |
2887 | 2887 | ||
2888 | guest_enter(); | 2888 | guest_enter_irqoff(); |
2889 | 2889 | ||
2890 | srcu_idx = srcu_read_lock(&vc->kvm->srcu); | 2890 | srcu_idx = srcu_read_lock(&vc->kvm->srcu); |
2891 | 2891 | ||
@@ -2893,8 +2893,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
2893 | 2893 | ||
2894 | srcu_read_unlock(&vc->kvm->srcu, srcu_idx); | 2894 | srcu_read_unlock(&vc->kvm->srcu, srcu_idx); |
2895 | 2895 | ||
2896 | guest_exit(); | ||
2897 | |||
2898 | trace_hardirqs_off(); | 2896 | trace_hardirqs_off(); |
2899 | set_irq_happened(trap); | 2897 | set_irq_happened(trap); |
2900 | 2898 | ||
@@ -2937,6 +2935,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
2937 | kvmppc_set_host_core(pcpu); | 2935 | kvmppc_set_host_core(pcpu); |
2938 | 2936 | ||
2939 | local_irq_enable(); | 2937 | local_irq_enable(); |
2938 | guest_exit(); | ||
2940 | 2939 | ||
2941 | /* Let secondaries go back to the offline loop */ | 2940 | /* Let secondaries go back to the offline loop */ |
2942 | for (i = 0; i < controlled_threads; ++i) { | 2941 | for (i = 0; i < controlled_threads; ++i) { |
@@ -3656,15 +3655,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | |||
3656 | goto up_out; | 3655 | goto up_out; |
3657 | 3656 | ||
3658 | psize = vma_kernel_pagesize(vma); | 3657 | psize = vma_kernel_pagesize(vma); |
3659 | porder = __ilog2(psize); | ||
3660 | 3658 | ||
3661 | up_read(¤t->mm->mmap_sem); | 3659 | up_read(¤t->mm->mmap_sem); |
3662 | 3660 | ||
3663 | /* We can handle 4k, 64k or 16M pages in the VRMA */ | 3661 | /* We can handle 4k, 64k or 16M pages in the VRMA */ |
3664 | err = -EINVAL; | 3662 | if (psize >= 0x1000000) |
3665 | if (!(psize == 0x1000 || psize == 0x10000 || | 3663 | psize = 0x1000000; |
3666 | psize == 0x1000000)) | 3664 | else if (psize >= 0x10000) |
3667 | goto out_srcu; | 3665 | psize = 0x10000; |
3666 | else | ||
3667 | psize = 0x1000; | ||
3668 | porder = __ilog2(psize); | ||
3668 | 3669 | ||
3669 | senc = slb_pgsize_encoding(psize); | 3670 | senc = slb_pgsize_encoding(psize); |
3670 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | | 3671 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index f31f357b8c5a..d33264697a31 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -320,7 +320,6 @@ kvm_novcpu_exit: | |||
320 | stw r12, STACK_SLOT_TRAP(r1) | 320 | stw r12, STACK_SLOT_TRAP(r1) |
321 | bl kvmhv_commence_exit | 321 | bl kvmhv_commence_exit |
322 | nop | 322 | nop |
323 | lwz r12, STACK_SLOT_TRAP(r1) | ||
324 | b kvmhv_switch_to_host | 323 | b kvmhv_switch_to_host |
325 | 324 | ||
326 | /* | 325 | /* |
@@ -1220,6 +1219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
1220 | 1219 | ||
1221 | secondary_too_late: | 1220 | secondary_too_late: |
1222 | li r12, 0 | 1221 | li r12, 0 |
1222 | stw r12, STACK_SLOT_TRAP(r1) | ||
1223 | cmpdi r4, 0 | 1223 | cmpdi r4, 0 |
1224 | beq 11f | 1224 | beq 11f |
1225 | stw r12, VCPU_TRAP(r4) | 1225 | stw r12, VCPU_TRAP(r4) |
@@ -1558,12 +1558,12 @@ mc_cont: | |||
1558 | 3: stw r5,VCPU_SLB_MAX(r9) | 1558 | 3: stw r5,VCPU_SLB_MAX(r9) |
1559 | 1559 | ||
1560 | guest_bypass: | 1560 | guest_bypass: |
1561 | stw r12, STACK_SLOT_TRAP(r1) | ||
1561 | mr r3, r12 | 1562 | mr r3, r12 |
1562 | /* Increment exit count, poke other threads to exit */ | 1563 | /* Increment exit count, poke other threads to exit */ |
1563 | bl kvmhv_commence_exit | 1564 | bl kvmhv_commence_exit |
1564 | nop | 1565 | nop |
1565 | ld r9, HSTATE_KVM_VCPU(r13) | 1566 | ld r9, HSTATE_KVM_VCPU(r13) |
1566 | lwz r12, VCPU_TRAP(r9) | ||
1567 | 1567 | ||
1568 | /* Stop others sending VCPU interrupts to this physical CPU */ | 1568 | /* Stop others sending VCPU interrupts to this physical CPU */ |
1569 | li r0, -1 | 1569 | li r0, -1 |
@@ -1898,6 +1898,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) | |||
1898 | * POWER7/POWER8 guest -> host partition switch code. | 1898 | * POWER7/POWER8 guest -> host partition switch code. |
1899 | * We don't have to lock against tlbies but we do | 1899 | * We don't have to lock against tlbies but we do |
1900 | * have to coordinate the hardware threads. | 1900 | * have to coordinate the hardware threads. |
1901 | * Here STACK_SLOT_TRAP(r1) contains the trap number. | ||
1901 | */ | 1902 | */ |
1902 | kvmhv_switch_to_host: | 1903 | kvmhv_switch_to_host: |
1903 | /* Secondary threads wait for primary to do partition switch */ | 1904 | /* Secondary threads wait for primary to do partition switch */ |
@@ -1950,12 +1951,12 @@ BEGIN_FTR_SECTION | |||
1950 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | 1951 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
1951 | 1952 | ||
1952 | /* If HMI, call kvmppc_realmode_hmi_handler() */ | 1953 | /* If HMI, call kvmppc_realmode_hmi_handler() */ |
1954 | lwz r12, STACK_SLOT_TRAP(r1) | ||
1953 | cmpwi r12, BOOK3S_INTERRUPT_HMI | 1955 | cmpwi r12, BOOK3S_INTERRUPT_HMI |
1954 | bne 27f | 1956 | bne 27f |
1955 | bl kvmppc_realmode_hmi_handler | 1957 | bl kvmppc_realmode_hmi_handler |
1956 | nop | 1958 | nop |
1957 | cmpdi r3, 0 | 1959 | cmpdi r3, 0 |
1958 | li r12, BOOK3S_INTERRUPT_HMI | ||
1959 | /* | 1960 | /* |
1960 | * At this point kvmppc_realmode_hmi_handler may have resync-ed | 1961 | * At this point kvmppc_realmode_hmi_handler may have resync-ed |
1961 | * the TB, and if it has, we must not subtract the guest timebase | 1962 | * the TB, and if it has, we must not subtract the guest timebase |
@@ -2008,10 +2009,8 @@ BEGIN_FTR_SECTION | |||
2008 | lwz r8, KVM_SPLIT_DO_RESTORE(r3) | 2009 | lwz r8, KVM_SPLIT_DO_RESTORE(r3) |
2009 | cmpwi r8, 0 | 2010 | cmpwi r8, 0 |
2010 | beq 47f | 2011 | beq 47f |
2011 | stw r12, STACK_SLOT_TRAP(r1) | ||
2012 | bl kvmhv_p9_restore_lpcr | 2012 | bl kvmhv_p9_restore_lpcr |
2013 | nop | 2013 | nop |
2014 | lwz r12, STACK_SLOT_TRAP(r1) | ||
2015 | b 48f | 2014 | b 48f |
2016 | 47: | 2015 | 47: |
2017 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | 2016 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
@@ -2049,6 +2048,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |||
2049 | li r0, KVM_GUEST_MODE_NONE | 2048 | li r0, KVM_GUEST_MODE_NONE |
2050 | stb r0, HSTATE_IN_GUEST(r13) | 2049 | stb r0, HSTATE_IN_GUEST(r13) |
2051 | 2050 | ||
2051 | lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ | ||
2052 | ld r0, SFS+PPC_LR_STKOFF(r1) | 2052 | ld r0, SFS+PPC_LR_STKOFF(r1) |
2053 | addi r1, r1, SFS | 2053 | addi r1, r1, SFS |
2054 | mtlr r0 | 2054 | mtlr r0 |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 403e642c78f5..52c205373986 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -1345,7 +1345,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, | |||
1345 | int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, | 1345 | int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, |
1346 | unsigned int rt, int is_default_endian) | 1346 | unsigned int rt, int is_default_endian) |
1347 | { | 1347 | { |
1348 | enum emulation_result emulated; | 1348 | enum emulation_result emulated = EMULATE_DONE; |
1349 | 1349 | ||
1350 | while (vcpu->arch.mmio_vmx_copy_nums) { | 1350 | while (vcpu->arch.mmio_vmx_copy_nums) { |
1351 | emulated = __kvmppc_handle_load(run, vcpu, rt, 8, | 1351 | emulated = __kvmppc_handle_load(run, vcpu, rt, 8, |
@@ -1608,7 +1608,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1608 | 1608 | ||
1609 | kvm_sigset_deactivate(vcpu); | 1609 | kvm_sigset_deactivate(vcpu); |
1610 | 1610 | ||
1611 | #ifdef CONFIG_ALTIVEC | ||
1611 | out: | 1612 | out: |
1613 | #endif | ||
1612 | vcpu_put(vcpu); | 1614 | vcpu_put(vcpu); |
1613 | return r; | 1615 | return r; |
1614 | } | 1616 | } |
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 0a34b0cec7b7..0ef3d9580e98 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c | |||
@@ -240,6 +240,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 | |||
240 | * goto out; | 240 | * goto out; |
241 | */ | 241 | */ |
242 | PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); | 242 | PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); |
243 | PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31); | ||
243 | PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); | 244 | PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); |
244 | PPC_BCC(COND_GE, out); | 245 | PPC_BCC(COND_GE, out); |
245 | 246 | ||
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index c0319cbf1eec..5510366d169a 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h | |||
@@ -34,9 +34,9 @@ | |||
34 | #define wmb() RISCV_FENCE(ow,ow) | 34 | #define wmb() RISCV_FENCE(ow,ow) |
35 | 35 | ||
36 | /* These barriers do not need to enforce ordering on devices, just memory. */ | 36 | /* These barriers do not need to enforce ordering on devices, just memory. */ |
37 | #define smp_mb() RISCV_FENCE(rw,rw) | 37 | #define __smp_mb() RISCV_FENCE(rw,rw) |
38 | #define smp_rmb() RISCV_FENCE(r,r) | 38 | #define __smp_rmb() RISCV_FENCE(r,r) |
39 | #define smp_wmb() RISCV_FENCE(w,w) | 39 | #define __smp_wmb() RISCV_FENCE(w,w) |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * This is a very specific barrier: it's currently only used in two places in | 42 | * This is a very specific barrier: it's currently only used in two places in |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 65154eaa3714..6c8ce15cde7b 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -63,6 +63,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
63 | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; | 63 | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; |
64 | /* pgd_alloc() did not account this pmd */ | 64 | /* pgd_alloc() did not account this pmd */ |
65 | mm_inc_nr_pmds(mm); | 65 | mm_inc_nr_pmds(mm); |
66 | mm_inc_nr_puds(mm); | ||
66 | } | 67 | } |
67 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); | 68 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); |
68 | return 0; | 69 | return 0; |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 13a133a6015c..a5621ea6d123 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/processor.h> | 14 | #include <asm/processor.h> |
15 | #include <asm/cache.h> | 15 | #include <asm/cache.h> |
16 | #include <asm/ctl_reg.h> | 16 | #include <asm/ctl_reg.h> |
17 | #include <asm/dwarf.h> | ||
17 | #include <asm/errno.h> | 18 | #include <asm/errno.h> |
18 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
19 | #include <asm/thread_info.h> | 20 | #include <asm/thread_info.h> |
@@ -230,7 +231,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) | |||
230 | .hidden \name | 231 | .hidden \name |
231 | .type \name,@function | 232 | .type \name,@function |
232 | \name: | 233 | \name: |
233 | .cfi_startproc | 234 | CFI_STARTPROC |
234 | #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES | 235 | #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES |
235 | exrl 0,0f | 236 | exrl 0,0f |
236 | #else | 237 | #else |
@@ -239,7 +240,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) | |||
239 | #endif | 240 | #endif |
240 | j . | 241 | j . |
241 | 0: br \reg | 242 | 0: br \reg |
242 | .cfi_endproc | 243 | CFI_ENDPROC |
243 | .endm | 244 | .endm |
244 | 245 | ||
245 | GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1 | 246 | GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1 |
@@ -426,13 +427,13 @@ ENTRY(system_call) | |||
426 | UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER | 427 | UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER |
427 | BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP | 428 | BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP |
428 | stmg %r0,%r7,__PT_R0(%r11) | 429 | stmg %r0,%r7,__PT_R0(%r11) |
429 | # clear user controlled register to prevent speculative use | ||
430 | xgr %r0,%r0 | ||
431 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC | 430 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
432 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW | 431 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW |
433 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | 432 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC |
434 | stg %r14,__PT_FLAGS(%r11) | 433 | stg %r14,__PT_FLAGS(%r11) |
435 | .Lsysc_do_svc: | 434 | .Lsysc_do_svc: |
435 | # clear user controlled register to prevent speculative use | ||
436 | xgr %r0,%r0 | ||
436 | # load address of system call table | 437 | # load address of system call table |
437 | lg %r10,__THREAD_sysc_table(%r13,%r12) | 438 | lg %r10,__THREAD_sysc_table(%r13,%r12) |
438 | llgh %r8,__PT_INT_CODE+2(%r11) | 439 | llgh %r8,__PT_INT_CODE+2(%r11) |
@@ -1439,6 +1440,7 @@ cleanup_critical: | |||
1439 | stg %r15,__LC_SYSTEM_TIMER | 1440 | stg %r15,__LC_SYSTEM_TIMER |
1440 | 0: # update accounting time stamp | 1441 | 0: # update accounting time stamp |
1441 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 1442 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
1443 | BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP | ||
1442 | # set up saved register r11 | 1444 | # set up saved register r11 |
1443 | lg %r15,__LC_KERNEL_STACK | 1445 | lg %r15,__LC_KERNEL_STACK |
1444 | la %r9,STACK_FRAME_OVERHEAD(%r15) | 1446 | la %r9,STACK_FRAME_OVERHEAD(%r15) |
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index 69d7fcf48158..9aff72d3abda 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c | |||
@@ -2,8 +2,8 @@ | |||
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <asm/nospec-branch.h> | 3 | #include <asm/nospec-branch.h> |
4 | 4 | ||
5 | int nospec_call_disable = IS_ENABLED(EXPOLINE_OFF); | 5 | int nospec_call_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF); |
6 | int nospec_return_disable = !IS_ENABLED(EXPOLINE_FULL); | 6 | int nospec_return_disable = !IS_ENABLED(CONFIG_EXPOLINE_FULL); |
7 | 7 | ||
8 | static int __init nospectre_v2_setup_early(char *str) | 8 | static int __init nospectre_v2_setup_early(char *str) |
9 | { | 9 | { |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 9c7d70715862..07c6e81163bf 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -22,22 +22,6 @@ | |||
22 | #include "trace.h" | 22 | #include "trace.h" |
23 | #include "trace-s390.h" | 23 | #include "trace-s390.h" |
24 | 24 | ||
25 | |||
26 | static const intercept_handler_t instruction_handlers[256] = { | ||
27 | [0x01] = kvm_s390_handle_01, | ||
28 | [0x82] = kvm_s390_handle_lpsw, | ||
29 | [0x83] = kvm_s390_handle_diag, | ||
30 | [0xaa] = kvm_s390_handle_aa, | ||
31 | [0xae] = kvm_s390_handle_sigp, | ||
32 | [0xb2] = kvm_s390_handle_b2, | ||
33 | [0xb6] = kvm_s390_handle_stctl, | ||
34 | [0xb7] = kvm_s390_handle_lctl, | ||
35 | [0xb9] = kvm_s390_handle_b9, | ||
36 | [0xe3] = kvm_s390_handle_e3, | ||
37 | [0xe5] = kvm_s390_handle_e5, | ||
38 | [0xeb] = kvm_s390_handle_eb, | ||
39 | }; | ||
40 | |||
41 | u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) | 25 | u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) |
42 | { | 26 | { |
43 | struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; | 27 | struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; |
@@ -129,16 +113,39 @@ static int handle_validity(struct kvm_vcpu *vcpu) | |||
129 | 113 | ||
130 | static int handle_instruction(struct kvm_vcpu *vcpu) | 114 | static int handle_instruction(struct kvm_vcpu *vcpu) |
131 | { | 115 | { |
132 | intercept_handler_t handler; | ||
133 | |||
134 | vcpu->stat.exit_instruction++; | 116 | vcpu->stat.exit_instruction++; |
135 | trace_kvm_s390_intercept_instruction(vcpu, | 117 | trace_kvm_s390_intercept_instruction(vcpu, |
136 | vcpu->arch.sie_block->ipa, | 118 | vcpu->arch.sie_block->ipa, |
137 | vcpu->arch.sie_block->ipb); | 119 | vcpu->arch.sie_block->ipb); |
138 | handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; | 120 | |
139 | if (handler) | 121 | switch (vcpu->arch.sie_block->ipa >> 8) { |
140 | return handler(vcpu); | 122 | case 0x01: |
141 | return -EOPNOTSUPP; | 123 | return kvm_s390_handle_01(vcpu); |
124 | case 0x82: | ||
125 | return kvm_s390_handle_lpsw(vcpu); | ||
126 | case 0x83: | ||
127 | return kvm_s390_handle_diag(vcpu); | ||
128 | case 0xaa: | ||
129 | return kvm_s390_handle_aa(vcpu); | ||
130 | case 0xae: | ||
131 | return kvm_s390_handle_sigp(vcpu); | ||
132 | case 0xb2: | ||
133 | return kvm_s390_handle_b2(vcpu); | ||
134 | case 0xb6: | ||
135 | return kvm_s390_handle_stctl(vcpu); | ||
136 | case 0xb7: | ||
137 | return kvm_s390_handle_lctl(vcpu); | ||
138 | case 0xb9: | ||
139 | return kvm_s390_handle_b9(vcpu); | ||
140 | case 0xe3: | ||
141 | return kvm_s390_handle_e3(vcpu); | ||
142 | case 0xe5: | ||
143 | return kvm_s390_handle_e5(vcpu); | ||
144 | case 0xeb: | ||
145 | return kvm_s390_handle_eb(vcpu); | ||
146 | default: | ||
147 | return -EOPNOTSUPP; | ||
148 | } | ||
142 | } | 149 | } |
143 | 150 | ||
144 | static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu) | 151 | static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index aabf46f5f883..b04616b57a94 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -169,8 +169,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) | |||
169 | 169 | ||
170 | static int ckc_irq_pending(struct kvm_vcpu *vcpu) | 170 | static int ckc_irq_pending(struct kvm_vcpu *vcpu) |
171 | { | 171 | { |
172 | if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) | 172 | const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); |
173 | const u64 ckc = vcpu->arch.sie_block->ckc; | ||
174 | |||
175 | if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { | ||
176 | if ((s64)ckc >= (s64)now) | ||
177 | return 0; | ||
178 | } else if (ckc >= now) { | ||
173 | return 0; | 179 | return 0; |
180 | } | ||
174 | return ckc_interrupts_enabled(vcpu); | 181 | return ckc_interrupts_enabled(vcpu); |
175 | } | 182 | } |
176 | 183 | ||
@@ -187,12 +194,6 @@ static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) | |||
187 | return kvm_s390_get_cpu_timer(vcpu) >> 63; | 194 | return kvm_s390_get_cpu_timer(vcpu) >> 63; |
188 | } | 195 | } |
189 | 196 | ||
190 | static inline int is_ioirq(unsigned long irq_type) | ||
191 | { | ||
192 | return ((irq_type >= IRQ_PEND_IO_ISC_7) && | ||
193 | (irq_type <= IRQ_PEND_IO_ISC_0)); | ||
194 | } | ||
195 | |||
196 | static uint64_t isc_to_isc_bits(int isc) | 197 | static uint64_t isc_to_isc_bits(int isc) |
197 | { | 198 | { |
198 | return (0x80 >> isc) << 24; | 199 | return (0x80 >> isc) << 24; |
@@ -236,10 +237,15 @@ static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gis | |||
236 | return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); | 237 | return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); |
237 | } | 238 | } |
238 | 239 | ||
239 | static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) | 240 | static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu) |
240 | { | 241 | { |
241 | return vcpu->kvm->arch.float_int.pending_irqs | | 242 | return vcpu->kvm->arch.float_int.pending_irqs | |
242 | vcpu->arch.local_int.pending_irqs | | 243 | vcpu->arch.local_int.pending_irqs; |
244 | } | ||
245 | |||
246 | static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) | ||
247 | { | ||
248 | return pending_irqs_no_gisa(vcpu) | | ||
243 | kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7; | 249 | kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7; |
244 | } | 250 | } |
245 | 251 | ||
@@ -337,7 +343,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | |||
337 | 343 | ||
338 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) | 344 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) |
339 | { | 345 | { |
340 | if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) | 346 | if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK)) |
341 | return; | 347 | return; |
342 | else if (psw_ioint_disabled(vcpu)) | 348 | else if (psw_ioint_disabled(vcpu)) |
343 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT); | 349 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT); |
@@ -1011,24 +1017,6 @@ out: | |||
1011 | return rc; | 1017 | return rc; |
1012 | } | 1018 | } |
1013 | 1019 | ||
1014 | typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); | ||
1015 | |||
1016 | static const deliver_irq_t deliver_irq_funcs[] = { | ||
1017 | [IRQ_PEND_MCHK_EX] = __deliver_machine_check, | ||
1018 | [IRQ_PEND_MCHK_REP] = __deliver_machine_check, | ||
1019 | [IRQ_PEND_PROG] = __deliver_prog, | ||
1020 | [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, | ||
1021 | [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, | ||
1022 | [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, | ||
1023 | [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, | ||
1024 | [IRQ_PEND_RESTART] = __deliver_restart, | ||
1025 | [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, | ||
1026 | [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, | ||
1027 | [IRQ_PEND_EXT_SERVICE] = __deliver_service, | ||
1028 | [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done, | ||
1029 | [IRQ_PEND_VIRTIO] = __deliver_virtio, | ||
1030 | }; | ||
1031 | |||
1032 | /* Check whether an external call is pending (deliverable or not) */ | 1020 | /* Check whether an external call is pending (deliverable or not) */ |
1033 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) | 1021 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) |
1034 | { | 1022 | { |
@@ -1066,13 +1054,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
1066 | 1054 | ||
1067 | static u64 __calculate_sltime(struct kvm_vcpu *vcpu) | 1055 | static u64 __calculate_sltime(struct kvm_vcpu *vcpu) |
1068 | { | 1056 | { |
1069 | u64 now, cputm, sltime = 0; | 1057 | const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); |
1058 | const u64 ckc = vcpu->arch.sie_block->ckc; | ||
1059 | u64 cputm, sltime = 0; | ||
1070 | 1060 | ||
1071 | if (ckc_interrupts_enabled(vcpu)) { | 1061 | if (ckc_interrupts_enabled(vcpu)) { |
1072 | now = kvm_s390_get_tod_clock_fast(vcpu->kvm); | 1062 | if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { |
1073 | sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); | 1063 | if ((s64)now < (s64)ckc) |
1074 | /* already expired or overflow? */ | 1064 | sltime = tod_to_ns((s64)ckc - (s64)now); |
1075 | if (!sltime || vcpu->arch.sie_block->ckc <= now) | 1065 | } else if (now < ckc) { |
1066 | sltime = tod_to_ns(ckc - now); | ||
1067 | } | ||
1068 | /* already expired */ | ||
1069 | if (!sltime) | ||
1076 | return 0; | 1070 | return 0; |
1077 | if (cpu_timer_interrupts_enabled(vcpu)) { | 1071 | if (cpu_timer_interrupts_enabled(vcpu)) { |
1078 | cputm = kvm_s390_get_cpu_timer(vcpu); | 1072 | cputm = kvm_s390_get_cpu_timer(vcpu); |
@@ -1192,7 +1186,6 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
1192 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | 1186 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) |
1193 | { | 1187 | { |
1194 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1188 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1195 | deliver_irq_t func; | ||
1196 | int rc = 0; | 1189 | int rc = 0; |
1197 | unsigned long irq_type; | 1190 | unsigned long irq_type; |
1198 | unsigned long irqs; | 1191 | unsigned long irqs; |
@@ -1212,16 +1205,57 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
1212 | while ((irqs = deliverable_irqs(vcpu)) && !rc) { | 1205 | while ((irqs = deliverable_irqs(vcpu)) && !rc) { |
1213 | /* bits are in the reverse order of interrupt priority */ | 1206 | /* bits are in the reverse order of interrupt priority */ |
1214 | irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT); | 1207 | irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT); |
1215 | if (is_ioirq(irq_type)) { | 1208 | switch (irq_type) { |
1209 | case IRQ_PEND_IO_ISC_0: | ||
1210 | case IRQ_PEND_IO_ISC_1: | ||
1211 | case IRQ_PEND_IO_ISC_2: | ||
1212 | case IRQ_PEND_IO_ISC_3: | ||
1213 | case IRQ_PEND_IO_ISC_4: | ||
1214 | case IRQ_PEND_IO_ISC_5: | ||
1215 | case IRQ_PEND_IO_ISC_6: | ||
1216 | case IRQ_PEND_IO_ISC_7: | ||
1216 | rc = __deliver_io(vcpu, irq_type); | 1217 | rc = __deliver_io(vcpu, irq_type); |
1217 | } else { | 1218 | break; |
1218 | func = deliver_irq_funcs[irq_type]; | 1219 | case IRQ_PEND_MCHK_EX: |
1219 | if (!func) { | 1220 | case IRQ_PEND_MCHK_REP: |
1220 | WARN_ON_ONCE(func == NULL); | 1221 | rc = __deliver_machine_check(vcpu); |
1221 | clear_bit(irq_type, &li->pending_irqs); | 1222 | break; |
1222 | continue; | 1223 | case IRQ_PEND_PROG: |
1223 | } | 1224 | rc = __deliver_prog(vcpu); |
1224 | rc = func(vcpu); | 1225 | break; |
1226 | case IRQ_PEND_EXT_EMERGENCY: | ||
1227 | rc = __deliver_emergency_signal(vcpu); | ||
1228 | break; | ||
1229 | case IRQ_PEND_EXT_EXTERNAL: | ||
1230 | rc = __deliver_external_call(vcpu); | ||
1231 | break; | ||
1232 | case IRQ_PEND_EXT_CLOCK_COMP: | ||
1233 | rc = __deliver_ckc(vcpu); | ||
1234 | break; | ||
1235 | case IRQ_PEND_EXT_CPU_TIMER: | ||
1236 | rc = __deliver_cpu_timer(vcpu); | ||
1237 | break; | ||
1238 | case IRQ_PEND_RESTART: | ||
1239 | rc = __deliver_restart(vcpu); | ||
1240 | break; | ||
1241 | case IRQ_PEND_SET_PREFIX: | ||
1242 | rc = __deliver_set_prefix(vcpu); | ||
1243 | break; | ||
1244 | case IRQ_PEND_PFAULT_INIT: | ||
1245 | rc = __deliver_pfault_init(vcpu); | ||
1246 | break; | ||
1247 | case IRQ_PEND_EXT_SERVICE: | ||
1248 | rc = __deliver_service(vcpu); | ||
1249 | break; | ||
1250 | case IRQ_PEND_PFAULT_DONE: | ||
1251 | rc = __deliver_pfault_done(vcpu); | ||
1252 | break; | ||
1253 | case IRQ_PEND_VIRTIO: | ||
1254 | rc = __deliver_virtio(vcpu); | ||
1255 | break; | ||
1256 | default: | ||
1257 | WARN_ONCE(1, "Unknown pending irq type %ld", irq_type); | ||
1258 | clear_bit(irq_type, &li->pending_irqs); | ||
1225 | } | 1259 | } |
1226 | } | 1260 | } |
1227 | 1261 | ||
@@ -1701,7 +1735,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) | |||
1701 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT); | 1735 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT); |
1702 | break; | 1736 | break; |
1703 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 1737 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
1704 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT); | 1738 | if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa)) |
1739 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT); | ||
1705 | break; | 1740 | break; |
1706 | default: | 1741 | default: |
1707 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT); | 1742 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT); |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ba4c7092335a..339ac0964590 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
86 | { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, | 86 | { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, |
87 | { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, | 87 | { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, |
88 | { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, | 88 | { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, |
89 | { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) }, | ||
89 | { "exit_wait_state", VCPU_STAT(exit_wait_state) }, | 90 | { "exit_wait_state", VCPU_STAT(exit_wait_state) }, |
90 | { "instruction_epsw", VCPU_STAT(instruction_epsw) }, | 91 | { "instruction_epsw", VCPU_STAT(instruction_epsw) }, |
91 | { "instruction_gs", VCPU_STAT(instruction_gs) }, | 92 | { "instruction_gs", VCPU_STAT(instruction_gs) }, |
@@ -179,6 +180,28 @@ int kvm_arch_hardware_enable(void) | |||
179 | static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, | 180 | static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, |
180 | unsigned long end); | 181 | unsigned long end); |
181 | 182 | ||
183 | static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta) | ||
184 | { | ||
185 | u8 delta_idx = 0; | ||
186 | |||
187 | /* | ||
188 | * The TOD jumps by delta, we have to compensate this by adding | ||
189 | * -delta to the epoch. | ||
190 | */ | ||
191 | delta = -delta; | ||
192 | |||
193 | /* sign-extension - we're adding to signed values below */ | ||
194 | if ((s64)delta < 0) | ||
195 | delta_idx = -1; | ||
196 | |||
197 | scb->epoch += delta; | ||
198 | if (scb->ecd & ECD_MEF) { | ||
199 | scb->epdx += delta_idx; | ||
200 | if (scb->epoch < delta) | ||
201 | scb->epdx += 1; | ||
202 | } | ||
203 | } | ||
204 | |||
182 | /* | 205 | /* |
183 | * This callback is executed during stop_machine(). All CPUs are therefore | 206 | * This callback is executed during stop_machine(). All CPUs are therefore |
184 | * temporarily stopped. In order not to change guest behavior, we have to | 207 | * temporarily stopped. In order not to change guest behavior, we have to |
@@ -194,13 +217,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, | |||
194 | unsigned long long *delta = v; | 217 | unsigned long long *delta = v; |
195 | 218 | ||
196 | list_for_each_entry(kvm, &vm_list, vm_list) { | 219 | list_for_each_entry(kvm, &vm_list, vm_list) { |
197 | kvm->arch.epoch -= *delta; | ||
198 | kvm_for_each_vcpu(i, vcpu, kvm) { | 220 | kvm_for_each_vcpu(i, vcpu, kvm) { |
199 | vcpu->arch.sie_block->epoch -= *delta; | 221 | kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); |
222 | if (i == 0) { | ||
223 | kvm->arch.epoch = vcpu->arch.sie_block->epoch; | ||
224 | kvm->arch.epdx = vcpu->arch.sie_block->epdx; | ||
225 | } | ||
200 | if (vcpu->arch.cputm_enabled) | 226 | if (vcpu->arch.cputm_enabled) |
201 | vcpu->arch.cputm_start += *delta; | 227 | vcpu->arch.cputm_start += *delta; |
202 | if (vcpu->arch.vsie_block) | 228 | if (vcpu->arch.vsie_block) |
203 | vcpu->arch.vsie_block->epoch -= *delta; | 229 | kvm_clock_sync_scb(vcpu->arch.vsie_block, |
230 | *delta); | ||
204 | } | 231 | } |
205 | } | 232 | } |
206 | return NOTIFY_OK; | 233 | return NOTIFY_OK; |
@@ -902,12 +929,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) | |||
902 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) | 929 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) |
903 | return -EFAULT; | 930 | return -EFAULT; |
904 | 931 | ||
905 | if (test_kvm_facility(kvm, 139)) | 932 | if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) |
906 | kvm_s390_set_tod_clock_ext(kvm, >od); | ||
907 | else if (gtod.epoch_idx == 0) | ||
908 | kvm_s390_set_tod_clock(kvm, gtod.tod); | ||
909 | else | ||
910 | return -EINVAL; | 933 | return -EINVAL; |
934 | kvm_s390_set_tod_clock(kvm, >od); | ||
911 | 935 | ||
912 | VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", | 936 | VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", |
913 | gtod.epoch_idx, gtod.tod); | 937 | gtod.epoch_idx, gtod.tod); |
@@ -932,13 +956,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) | |||
932 | 956 | ||
933 | static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) | 957 | static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) |
934 | { | 958 | { |
935 | u64 gtod; | 959 | struct kvm_s390_vm_tod_clock gtod = { 0 }; |
936 | 960 | ||
937 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) | 961 | if (copy_from_user(>od.tod, (void __user *)attr->addr, |
962 | sizeof(gtod.tod))) | ||
938 | return -EFAULT; | 963 | return -EFAULT; |
939 | 964 | ||
940 | kvm_s390_set_tod_clock(kvm, gtod); | 965 | kvm_s390_set_tod_clock(kvm, >od); |
941 | VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod); | 966 | VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); |
942 | return 0; | 967 | return 0; |
943 | } | 968 | } |
944 | 969 | ||
@@ -2122,6 +2147,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu) | |||
2122 | /* we still need the basic sca for the ipte control */ | 2147 | /* we still need the basic sca for the ipte control */ |
2123 | vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); | 2148 | vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); |
2124 | vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; | 2149 | vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; |
2150 | return; | ||
2125 | } | 2151 | } |
2126 | read_lock(&vcpu->kvm->arch.sca_lock); | 2152 | read_lock(&vcpu->kvm->arch.sca_lock); |
2127 | if (vcpu->kvm->arch.use_esca) { | 2153 | if (vcpu->kvm->arch.use_esca) { |
@@ -2389,6 +2415,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | |||
2389 | mutex_lock(&vcpu->kvm->lock); | 2415 | mutex_lock(&vcpu->kvm->lock); |
2390 | preempt_disable(); | 2416 | preempt_disable(); |
2391 | vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; | 2417 | vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; |
2418 | vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; | ||
2392 | preempt_enable(); | 2419 | preempt_enable(); |
2393 | mutex_unlock(&vcpu->kvm->lock); | 2420 | mutex_unlock(&vcpu->kvm->lock); |
2394 | if (!kvm_is_ucontrol(vcpu->kvm)) { | 2421 | if (!kvm_is_ucontrol(vcpu->kvm)) { |
@@ -3021,8 +3048,8 @@ retry: | |||
3021 | return 0; | 3048 | return 0; |
3022 | } | 3049 | } |
3023 | 3050 | ||
3024 | void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | 3051 | void kvm_s390_set_tod_clock(struct kvm *kvm, |
3025 | const struct kvm_s390_vm_tod_clock *gtod) | 3052 | const struct kvm_s390_vm_tod_clock *gtod) |
3026 | { | 3053 | { |
3027 | struct kvm_vcpu *vcpu; | 3054 | struct kvm_vcpu *vcpu; |
3028 | struct kvm_s390_tod_clock_ext htod; | 3055 | struct kvm_s390_tod_clock_ext htod; |
@@ -3034,10 +3061,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | |||
3034 | get_tod_clock_ext((char *)&htod); | 3061 | get_tod_clock_ext((char *)&htod); |
3035 | 3062 | ||
3036 | kvm->arch.epoch = gtod->tod - htod.tod; | 3063 | kvm->arch.epoch = gtod->tod - htod.tod; |
3037 | kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; | 3064 | kvm->arch.epdx = 0; |
3038 | 3065 | if (test_kvm_facility(kvm, 139)) { | |
3039 | if (kvm->arch.epoch > gtod->tod) | 3066 | kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; |
3040 | kvm->arch.epdx -= 1; | 3067 | if (kvm->arch.epoch > gtod->tod) |
3068 | kvm->arch.epdx -= 1; | ||
3069 | } | ||
3041 | 3070 | ||
3042 | kvm_s390_vcpu_block_all(kvm); | 3071 | kvm_s390_vcpu_block_all(kvm); |
3043 | kvm_for_each_vcpu(i, vcpu, kvm) { | 3072 | kvm_for_each_vcpu(i, vcpu, kvm) { |
@@ -3050,22 +3079,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | |||
3050 | mutex_unlock(&kvm->lock); | 3079 | mutex_unlock(&kvm->lock); |
3051 | } | 3080 | } |
3052 | 3081 | ||
3053 | void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod) | ||
3054 | { | ||
3055 | struct kvm_vcpu *vcpu; | ||
3056 | int i; | ||
3057 | |||
3058 | mutex_lock(&kvm->lock); | ||
3059 | preempt_disable(); | ||
3060 | kvm->arch.epoch = tod - get_tod_clock(); | ||
3061 | kvm_s390_vcpu_block_all(kvm); | ||
3062 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
3063 | vcpu->arch.sie_block->epoch = kvm->arch.epoch; | ||
3064 | kvm_s390_vcpu_unblock_all(kvm); | ||
3065 | preempt_enable(); | ||
3066 | mutex_unlock(&kvm->lock); | ||
3067 | } | ||
3068 | |||
3069 | /** | 3082 | /** |
3070 | * kvm_arch_fault_in_page - fault-in guest page if necessary | 3083 | * kvm_arch_fault_in_page - fault-in guest page if necessary |
3071 | * @vcpu: The corresponding virtual cpu | 3084 | * @vcpu: The corresponding virtual cpu |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index bd31b37b0e6f..f55ac0ef99ea 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -19,8 +19,6 @@ | |||
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <asm/sclp.h> | 20 | #include <asm/sclp.h> |
21 | 21 | ||
22 | typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); | ||
23 | |||
24 | /* Transactional Memory Execution related macros */ | 22 | /* Transactional Memory Execution related macros */ |
25 | #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) | 23 | #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) |
26 | #define TDB_FORMAT1 1 | 24 | #define TDB_FORMAT1 1 |
@@ -283,9 +281,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); | |||
283 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); | 281 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); |
284 | 282 | ||
285 | /* implemented in kvm-s390.c */ | 283 | /* implemented in kvm-s390.c */ |
286 | void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | 284 | void kvm_s390_set_tod_clock(struct kvm *kvm, |
287 | const struct kvm_s390_vm_tod_clock *gtod); | 285 | const struct kvm_s390_vm_tod_clock *gtod); |
288 | void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); | ||
289 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); | 286 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); |
290 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); | 287 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); |
291 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); | 288 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index c4c4e157c036..f0b4185158af 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -85,9 +85,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) | |||
85 | /* Handle SCK (SET CLOCK) interception */ | 85 | /* Handle SCK (SET CLOCK) interception */ |
86 | static int handle_set_clock(struct kvm_vcpu *vcpu) | 86 | static int handle_set_clock(struct kvm_vcpu *vcpu) |
87 | { | 87 | { |
88 | struct kvm_s390_vm_tod_clock gtod = { 0 }; | ||
88 | int rc; | 89 | int rc; |
89 | u8 ar; | 90 | u8 ar; |
90 | u64 op2, val; | 91 | u64 op2; |
91 | 92 | ||
92 | vcpu->stat.instruction_sck++; | 93 | vcpu->stat.instruction_sck++; |
93 | 94 | ||
@@ -97,12 +98,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
97 | op2 = kvm_s390_get_base_disp_s(vcpu, &ar); | 98 | op2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
98 | if (op2 & 7) /* Operand must be on a doubleword boundary */ | 99 | if (op2 & 7) /* Operand must be on a doubleword boundary */ |
99 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 100 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
100 | rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); | 101 | rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod)); |
101 | if (rc) | 102 | if (rc) |
102 | return kvm_s390_inject_prog_cond(vcpu, rc); | 103 | return kvm_s390_inject_prog_cond(vcpu, rc); |
103 | 104 | ||
104 | VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); | 105 | VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); |
105 | kvm_s390_set_tod_clock(vcpu->kvm, val); | 106 | kvm_s390_set_tod_clock(vcpu->kvm, >od); |
106 | 107 | ||
107 | kvm_s390_set_psw_cc(vcpu, 0); | 108 | kvm_s390_set_psw_cc(vcpu, 0); |
108 | return 0; | 109 | return 0; |
@@ -795,55 +796,60 @@ out: | |||
795 | return rc; | 796 | return rc; |
796 | } | 797 | } |
797 | 798 | ||
798 | static const intercept_handler_t b2_handlers[256] = { | ||
799 | [0x02] = handle_stidp, | ||
800 | [0x04] = handle_set_clock, | ||
801 | [0x10] = handle_set_prefix, | ||
802 | [0x11] = handle_store_prefix, | ||
803 | [0x12] = handle_store_cpu_address, | ||
804 | [0x14] = kvm_s390_handle_vsie, | ||
805 | [0x21] = handle_ipte_interlock, | ||
806 | [0x29] = handle_iske, | ||
807 | [0x2a] = handle_rrbe, | ||
808 | [0x2b] = handle_sske, | ||
809 | [0x2c] = handle_test_block, | ||
810 | [0x30] = handle_io_inst, | ||
811 | [0x31] = handle_io_inst, | ||
812 | [0x32] = handle_io_inst, | ||
813 | [0x33] = handle_io_inst, | ||
814 | [0x34] = handle_io_inst, | ||
815 | [0x35] = handle_io_inst, | ||
816 | [0x36] = handle_io_inst, | ||
817 | [0x37] = handle_io_inst, | ||
818 | [0x38] = handle_io_inst, | ||
819 | [0x39] = handle_io_inst, | ||
820 | [0x3a] = handle_io_inst, | ||
821 | [0x3b] = handle_io_inst, | ||
822 | [0x3c] = handle_io_inst, | ||
823 | [0x50] = handle_ipte_interlock, | ||
824 | [0x56] = handle_sthyi, | ||
825 | [0x5f] = handle_io_inst, | ||
826 | [0x74] = handle_io_inst, | ||
827 | [0x76] = handle_io_inst, | ||
828 | [0x7d] = handle_stsi, | ||
829 | [0xb1] = handle_stfl, | ||
830 | [0xb2] = handle_lpswe, | ||
831 | }; | ||
832 | |||
833 | int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) | 799 | int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) |
834 | { | 800 | { |
835 | intercept_handler_t handler; | 801 | switch (vcpu->arch.sie_block->ipa & 0x00ff) { |
836 | 802 | case 0x02: | |
837 | /* | 803 | return handle_stidp(vcpu); |
838 | * A lot of B2 instructions are priviledged. Here we check for | 804 | case 0x04: |
839 | * the privileged ones, that we can handle in the kernel. | 805 | return handle_set_clock(vcpu); |
840 | * Anything else goes to userspace. | 806 | case 0x10: |
841 | */ | 807 | return handle_set_prefix(vcpu); |
842 | handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; | 808 | case 0x11: |
843 | if (handler) | 809 | return handle_store_prefix(vcpu); |
844 | return handler(vcpu); | 810 | case 0x12: |
845 | 811 | return handle_store_cpu_address(vcpu); | |
846 | return -EOPNOTSUPP; | 812 | case 0x14: |
813 | return kvm_s390_handle_vsie(vcpu); | ||
814 | case 0x21: | ||
815 | case 0x50: | ||
816 | return handle_ipte_interlock(vcpu); | ||
817 | case 0x29: | ||
818 | return handle_iske(vcpu); | ||
819 | case 0x2a: | ||
820 | return handle_rrbe(vcpu); | ||
821 | case 0x2b: | ||
822 | return handle_sske(vcpu); | ||
823 | case 0x2c: | ||
824 | return handle_test_block(vcpu); | ||
825 | case 0x30: | ||
826 | case 0x31: | ||
827 | case 0x32: | ||
828 | case 0x33: | ||
829 | case 0x34: | ||
830 | case 0x35: | ||
831 | case 0x36: | ||
832 | case 0x37: | ||
833 | case 0x38: | ||
834 | case 0x39: | ||
835 | case 0x3a: | ||
836 | case 0x3b: | ||
837 | case 0x3c: | ||
838 | case 0x5f: | ||
839 | case 0x74: | ||
840 | case 0x76: | ||
841 | return handle_io_inst(vcpu); | ||
842 | case 0x56: | ||
843 | return handle_sthyi(vcpu); | ||
844 | case 0x7d: | ||
845 | return handle_stsi(vcpu); | ||
846 | case 0xb1: | ||
847 | return handle_stfl(vcpu); | ||
848 | case 0xb2: | ||
849 | return handle_lpswe(vcpu); | ||
850 | default: | ||
851 | return -EOPNOTSUPP; | ||
852 | } | ||
847 | } | 853 | } |
848 | 854 | ||
849 | static int handle_epsw(struct kvm_vcpu *vcpu) | 855 | static int handle_epsw(struct kvm_vcpu *vcpu) |
@@ -1105,25 +1111,22 @@ static int handle_essa(struct kvm_vcpu *vcpu) | |||
1105 | return 0; | 1111 | return 0; |
1106 | } | 1112 | } |
1107 | 1113 | ||
1108 | static const intercept_handler_t b9_handlers[256] = { | ||
1109 | [0x8a] = handle_ipte_interlock, | ||
1110 | [0x8d] = handle_epsw, | ||
1111 | [0x8e] = handle_ipte_interlock, | ||
1112 | [0x8f] = handle_ipte_interlock, | ||
1113 | [0xab] = handle_essa, | ||
1114 | [0xaf] = handle_pfmf, | ||
1115 | }; | ||
1116 | |||
1117 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) | 1114 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) |
1118 | { | 1115 | { |
1119 | intercept_handler_t handler; | 1116 | switch (vcpu->arch.sie_block->ipa & 0x00ff) { |
1120 | 1117 | case 0x8a: | |
1121 | /* This is handled just as for the B2 instructions. */ | 1118 | case 0x8e: |
1122 | handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; | 1119 | case 0x8f: |
1123 | if (handler) | 1120 | return handle_ipte_interlock(vcpu); |
1124 | return handler(vcpu); | 1121 | case 0x8d: |
1125 | 1122 | return handle_epsw(vcpu); | |
1126 | return -EOPNOTSUPP; | 1123 | case 0xab: |
1124 | return handle_essa(vcpu); | ||
1125 | case 0xaf: | ||
1126 | return handle_pfmf(vcpu); | ||
1127 | default: | ||
1128 | return -EOPNOTSUPP; | ||
1129 | } | ||
1127 | } | 1130 | } |
1128 | 1131 | ||
1129 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) | 1132 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) |
@@ -1271,22 +1274,20 @@ static int handle_stctg(struct kvm_vcpu *vcpu) | |||
1271 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; | 1274 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; |
1272 | } | 1275 | } |
1273 | 1276 | ||
1274 | static const intercept_handler_t eb_handlers[256] = { | ||
1275 | [0x2f] = handle_lctlg, | ||
1276 | [0x25] = handle_stctg, | ||
1277 | [0x60] = handle_ri, | ||
1278 | [0x61] = handle_ri, | ||
1279 | [0x62] = handle_ri, | ||
1280 | }; | ||
1281 | |||
1282 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) | 1277 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) |
1283 | { | 1278 | { |
1284 | intercept_handler_t handler; | 1279 | switch (vcpu->arch.sie_block->ipb & 0x000000ff) { |
1285 | 1280 | case 0x25: | |
1286 | handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; | 1281 | return handle_stctg(vcpu); |
1287 | if (handler) | 1282 | case 0x2f: |
1288 | return handler(vcpu); | 1283 | return handle_lctlg(vcpu); |
1289 | return -EOPNOTSUPP; | 1284 | case 0x60: |
1285 | case 0x61: | ||
1286 | case 0x62: | ||
1287 | return handle_ri(vcpu); | ||
1288 | default: | ||
1289 | return -EOPNOTSUPP; | ||
1290 | } | ||
1290 | } | 1291 | } |
1291 | 1292 | ||
1292 | static int handle_tprot(struct kvm_vcpu *vcpu) | 1293 | static int handle_tprot(struct kvm_vcpu *vcpu) |
@@ -1346,10 +1347,12 @@ out_unlock: | |||
1346 | 1347 | ||
1347 | int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) | 1348 | int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) |
1348 | { | 1349 | { |
1349 | /* For e5xx... instructions we only handle TPROT */ | 1350 | switch (vcpu->arch.sie_block->ipa & 0x00ff) { |
1350 | if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) | 1351 | case 0x01: |
1351 | return handle_tprot(vcpu); | 1352 | return handle_tprot(vcpu); |
1352 | return -EOPNOTSUPP; | 1353 | default: |
1354 | return -EOPNOTSUPP; | ||
1355 | } | ||
1353 | } | 1356 | } |
1354 | 1357 | ||
1355 | static int handle_sckpf(struct kvm_vcpu *vcpu) | 1358 | static int handle_sckpf(struct kvm_vcpu *vcpu) |
@@ -1380,17 +1383,14 @@ static int handle_ptff(struct kvm_vcpu *vcpu) | |||
1380 | return 0; | 1383 | return 0; |
1381 | } | 1384 | } |
1382 | 1385 | ||
1383 | static const intercept_handler_t x01_handlers[256] = { | ||
1384 | [0x04] = handle_ptff, | ||
1385 | [0x07] = handle_sckpf, | ||
1386 | }; | ||
1387 | |||
1388 | int kvm_s390_handle_01(struct kvm_vcpu *vcpu) | 1386 | int kvm_s390_handle_01(struct kvm_vcpu *vcpu) |
1389 | { | 1387 | { |
1390 | intercept_handler_t handler; | 1388 | switch (vcpu->arch.sie_block->ipa & 0x00ff) { |
1391 | 1389 | case 0x04: | |
1392 | handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; | 1390 | return handle_ptff(vcpu); |
1393 | if (handler) | 1391 | case 0x07: |
1394 | return handler(vcpu); | 1392 | return handle_sckpf(vcpu); |
1395 | return -EOPNOTSUPP; | 1393 | default: |
1394 | return -EOPNOTSUPP; | ||
1395 | } | ||
1396 | } | 1396 | } |
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index ec772700ff96..8961e3970901 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c | |||
@@ -821,6 +821,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
821 | { | 821 | { |
822 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; | 822 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
823 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; | 823 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; |
824 | int guest_bp_isolation; | ||
824 | int rc; | 825 | int rc; |
825 | 826 | ||
826 | handle_last_fault(vcpu, vsie_page); | 827 | handle_last_fault(vcpu, vsie_page); |
@@ -831,6 +832,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
831 | s390_handle_mcck(); | 832 | s390_handle_mcck(); |
832 | 833 | ||
833 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 834 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
835 | |||
836 | /* save current guest state of bp isolation override */ | ||
837 | guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST); | ||
838 | |||
839 | /* | ||
840 | * The guest is running with BPBC, so we have to force it on for our | ||
841 | * nested guest. This is done by enabling BPBC globally, so the BPBC | ||
842 | * control in the SCB (which the nested guest can modify) is simply | ||
843 | * ignored. | ||
844 | */ | ||
845 | if (test_kvm_facility(vcpu->kvm, 82) && | ||
846 | vcpu->arch.sie_block->fpf & FPF_BPBC) | ||
847 | set_thread_flag(TIF_ISOLATE_BP_GUEST); | ||
848 | |||
834 | local_irq_disable(); | 849 | local_irq_disable(); |
835 | guest_enter_irqoff(); | 850 | guest_enter_irqoff(); |
836 | local_irq_enable(); | 851 | local_irq_enable(); |
@@ -840,6 +855,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
840 | local_irq_disable(); | 855 | local_irq_disable(); |
841 | guest_exit_irqoff(); | 856 | guest_exit_irqoff(); |
842 | local_irq_enable(); | 857 | local_irq_enable(); |
858 | |||
859 | /* restore guest state for bp isolation override */ | ||
860 | if (!guest_bp_isolation) | ||
861 | clear_thread_flag(TIF_ISOLATE_BP_GUEST); | ||
862 | |||
843 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | 863 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
844 | 864 | ||
845 | if (rc == -EINTR) { | 865 | if (rc == -EINTR) { |
diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile index 715def00a436..01d0f7fb14cc 100644 --- a/arch/sh/boot/dts/Makefile +++ b/arch/sh/boot/dts/Makefile | |||
@@ -1 +1,3 @@ | |||
1 | obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o | 1 | ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") |
2 | obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o | ||
3 | endif | ||
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 847ddffbf38a..b5cfab711651 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -163,13 +163,10 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, | |||
163 | pte_unmap(pte); | 163 | pte_unmap(pte); |
164 | } | 164 | } |
165 | 165 | ||
166 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
167 | pmd_t *pmdp, pmd_t pmd) | ||
168 | { | ||
169 | pmd_t orig = *pmdp; | ||
170 | |||
171 | *pmdp = pmd; | ||
172 | 166 | ||
167 | static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr, | ||
168 | pmd_t orig, pmd_t pmd) | ||
169 | { | ||
173 | if (mm == &init_mm) | 170 | if (mm == &init_mm) |
174 | return; | 171 | return; |
175 | 172 | ||
@@ -219,6 +216,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
219 | } | 216 | } |
220 | } | 217 | } |
221 | 218 | ||
219 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
220 | pmd_t *pmdp, pmd_t pmd) | ||
221 | { | ||
222 | pmd_t orig = *pmdp; | ||
223 | |||
224 | *pmdp = pmd; | ||
225 | __set_pmd_acct(mm, addr, orig, pmd); | ||
226 | } | ||
227 | |||
222 | static inline pmd_t pmdp_establish(struct vm_area_struct *vma, | 228 | static inline pmd_t pmdp_establish(struct vm_area_struct *vma, |
223 | unsigned long address, pmd_t *pmdp, pmd_t pmd) | 229 | unsigned long address, pmd_t *pmdp, pmd_t pmd) |
224 | { | 230 | { |
@@ -227,6 +233,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, | |||
227 | do { | 233 | do { |
228 | old = *pmdp; | 234 | old = *pmdp; |
229 | } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); | 235 | } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); |
236 | __set_pmd_acct(vma->vm_mm, address, old, pmd); | ||
230 | 237 | ||
231 | return old; | 238 | return old; |
232 | } | 239 | } |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c1236b187824..0fa71a78ec99 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -430,6 +430,7 @@ config GOLDFISH | |||
430 | config RETPOLINE | 430 | config RETPOLINE |
431 | bool "Avoid speculative indirect branches in kernel" | 431 | bool "Avoid speculative indirect branches in kernel" |
432 | default y | 432 | default y |
433 | select STACK_VALIDATION if HAVE_STACK_VALIDATION | ||
433 | help | 434 | help |
434 | Compile kernel with the retpoline compiler options to guard against | 435 | Compile kernel with the retpoline compiler options to guard against |
435 | kernel-to-user data leaks by avoiding speculative indirect | 436 | kernel-to-user data leaks by avoiding speculative indirect |
@@ -2306,7 +2307,7 @@ choice | |||
2306 | it can be used to assist security vulnerability exploitation. | 2307 | it can be used to assist security vulnerability exploitation. |
2307 | 2308 | ||
2308 | This setting can be changed at boot time via the kernel command | 2309 | This setting can be changed at boot time via the kernel command |
2309 | line parameter vsyscall=[native|emulate|none]. | 2310 | line parameter vsyscall=[emulate|none]. |
2310 | 2311 | ||
2311 | On a system with recent enough glibc (2.14 or newer) and no | 2312 | On a system with recent enough glibc (2.14 or newer) and no |
2312 | static binaries, you can say None without a performance penalty | 2313 | static binaries, you can say None without a performance penalty |
@@ -2314,15 +2315,6 @@ choice | |||
2314 | 2315 | ||
2315 | If unsure, select "Emulate". | 2316 | If unsure, select "Emulate". |
2316 | 2317 | ||
2317 | config LEGACY_VSYSCALL_NATIVE | ||
2318 | bool "Native" | ||
2319 | help | ||
2320 | Actual executable code is located in the fixed vsyscall | ||
2321 | address mapping, implementing time() efficiently. Since | ||
2322 | this makes the mapping executable, it can be used during | ||
2323 | security vulnerability exploitation (traditionally as | ||
2324 | ROP gadgets). This configuration is not recommended. | ||
2325 | |||
2326 | config LEGACY_VSYSCALL_EMULATE | 2318 | config LEGACY_VSYSCALL_EMULATE |
2327 | bool "Emulate" | 2319 | bool "Emulate" |
2328 | help | 2320 | help |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index fad55160dcb9..498c1b812300 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -232,10 +232,9 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | |||
232 | 232 | ||
233 | # Avoid indirect branches in kernel to deal with Spectre | 233 | # Avoid indirect branches in kernel to deal with Spectre |
234 | ifdef CONFIG_RETPOLINE | 234 | ifdef CONFIG_RETPOLINE |
235 | RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) | 235 | ifneq ($(RETPOLINE_CFLAGS),) |
236 | ifneq ($(RETPOLINE_CFLAGS),) | 236 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE |
237 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE | 237 | endif |
238 | endif | ||
239 | endif | 238 | endif |
240 | 239 | ||
241 | archscripts: scripts_basic | 240 | archscripts: scripts_basic |
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index dce7092ab24a..be63330c5511 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h | |||
@@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
97 | 97 | ||
98 | #define SIZEOF_PTREGS 21*8 | 98 | #define SIZEOF_PTREGS 21*8 |
99 | 99 | ||
100 | .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax | 100 | .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 |
101 | /* | 101 | /* |
102 | * Push registers and sanitize registers of values that a | 102 | * Push registers and sanitize registers of values that a |
103 | * speculation attack might otherwise want to exploit. The | 103 | * speculation attack might otherwise want to exploit. The |
@@ -105,32 +105,41 @@ For 32-bit we have the following conventions - kernel is built with | |||
105 | * could be put to use in a speculative execution gadget. | 105 | * could be put to use in a speculative execution gadget. |
106 | * Interleave XOR with PUSH for better uop scheduling: | 106 | * Interleave XOR with PUSH for better uop scheduling: |
107 | */ | 107 | */ |
108 | .if \save_ret | ||
109 | pushq %rsi /* pt_regs->si */ | ||
110 | movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ | ||
111 | movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ | ||
112 | .else | ||
108 | pushq %rdi /* pt_regs->di */ | 113 | pushq %rdi /* pt_regs->di */ |
109 | pushq %rsi /* pt_regs->si */ | 114 | pushq %rsi /* pt_regs->si */ |
115 | .endif | ||
110 | pushq \rdx /* pt_regs->dx */ | 116 | pushq \rdx /* pt_regs->dx */ |
111 | pushq %rcx /* pt_regs->cx */ | 117 | pushq %rcx /* pt_regs->cx */ |
112 | pushq \rax /* pt_regs->ax */ | 118 | pushq \rax /* pt_regs->ax */ |
113 | pushq %r8 /* pt_regs->r8 */ | 119 | pushq %r8 /* pt_regs->r8 */ |
114 | xorq %r8, %r8 /* nospec r8 */ | 120 | xorl %r8d, %r8d /* nospec r8 */ |
115 | pushq %r9 /* pt_regs->r9 */ | 121 | pushq %r9 /* pt_regs->r9 */ |
116 | xorq %r9, %r9 /* nospec r9 */ | 122 | xorl %r9d, %r9d /* nospec r9 */ |
117 | pushq %r10 /* pt_regs->r10 */ | 123 | pushq %r10 /* pt_regs->r10 */ |
118 | xorq %r10, %r10 /* nospec r10 */ | 124 | xorl %r10d, %r10d /* nospec r10 */ |
119 | pushq %r11 /* pt_regs->r11 */ | 125 | pushq %r11 /* pt_regs->r11 */ |
120 | xorq %r11, %r11 /* nospec r11*/ | 126 | xorl %r11d, %r11d /* nospec r11*/ |
121 | pushq %rbx /* pt_regs->rbx */ | 127 | pushq %rbx /* pt_regs->rbx */ |
122 | xorl %ebx, %ebx /* nospec rbx*/ | 128 | xorl %ebx, %ebx /* nospec rbx*/ |
123 | pushq %rbp /* pt_regs->rbp */ | 129 | pushq %rbp /* pt_regs->rbp */ |
124 | xorl %ebp, %ebp /* nospec rbp*/ | 130 | xorl %ebp, %ebp /* nospec rbp*/ |
125 | pushq %r12 /* pt_regs->r12 */ | 131 | pushq %r12 /* pt_regs->r12 */ |
126 | xorq %r12, %r12 /* nospec r12*/ | 132 | xorl %r12d, %r12d /* nospec r12*/ |
127 | pushq %r13 /* pt_regs->r13 */ | 133 | pushq %r13 /* pt_regs->r13 */ |
128 | xorq %r13, %r13 /* nospec r13*/ | 134 | xorl %r13d, %r13d /* nospec r13*/ |
129 | pushq %r14 /* pt_regs->r14 */ | 135 | pushq %r14 /* pt_regs->r14 */ |
130 | xorq %r14, %r14 /* nospec r14*/ | 136 | xorl %r14d, %r14d /* nospec r14*/ |
131 | pushq %r15 /* pt_regs->r15 */ | 137 | pushq %r15 /* pt_regs->r15 */ |
132 | xorq %r15, %r15 /* nospec r15*/ | 138 | xorl %r15d, %r15d /* nospec r15*/ |
133 | UNWIND_HINT_REGS | 139 | UNWIND_HINT_REGS |
140 | .if \save_ret | ||
141 | pushq %rsi /* return address on top of stack */ | ||
142 | .endif | ||
134 | .endm | 143 | .endm |
135 | 144 | ||
136 | .macro POP_REGS pop_rdi=1 skip_r11rcx=0 | 145 | .macro POP_REGS pop_rdi=1 skip_r11rcx=0 |
@@ -172,12 +181,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
172 | */ | 181 | */ |
173 | .macro ENCODE_FRAME_POINTER ptregs_offset=0 | 182 | .macro ENCODE_FRAME_POINTER ptregs_offset=0 |
174 | #ifdef CONFIG_FRAME_POINTER | 183 | #ifdef CONFIG_FRAME_POINTER |
175 | .if \ptregs_offset | 184 | leaq 1+\ptregs_offset(%rsp), %rbp |
176 | leaq \ptregs_offset(%rsp), %rbp | ||
177 | .else | ||
178 | mov %rsp, %rbp | ||
179 | .endif | ||
180 | orq $0x1, %rbp | ||
181 | #endif | 185 | #endif |
182 | .endm | 186 | .endm |
183 | 187 | ||
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 16c2c022540d..6ad064c8cf35 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -252,8 +252,7 @@ ENTRY(__switch_to_asm) | |||
252 | * exist, overwrite the RSB with entries which capture | 252 | * exist, overwrite the RSB with entries which capture |
253 | * speculative execution to prevent attack. | 253 | * speculative execution to prevent attack. |
254 | */ | 254 | */ |
255 | /* Clobbers %ebx */ | 255 | FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
256 | FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW | ||
257 | #endif | 256 | #endif |
258 | 257 | ||
259 | /* restore callee-saved registers */ | 258 | /* restore callee-saved registers */ |
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 8971bd64d515..805f52703ee3 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -55,7 +55,7 @@ END(native_usergs_sysret64) | |||
55 | 55 | ||
56 | .macro TRACE_IRQS_FLAGS flags:req | 56 | .macro TRACE_IRQS_FLAGS flags:req |
57 | #ifdef CONFIG_TRACE_IRQFLAGS | 57 | #ifdef CONFIG_TRACE_IRQFLAGS |
58 | bt $9, \flags /* interrupts off? */ | 58 | btl $9, \flags /* interrupts off? */ |
59 | jnc 1f | 59 | jnc 1f |
60 | TRACE_IRQS_ON | 60 | TRACE_IRQS_ON |
61 | 1: | 61 | 1: |
@@ -364,8 +364,7 @@ ENTRY(__switch_to_asm) | |||
364 | * exist, overwrite the RSB with entries which capture | 364 | * exist, overwrite the RSB with entries which capture |
365 | * speculative execution to prevent attack. | 365 | * speculative execution to prevent attack. |
366 | */ | 366 | */ |
367 | /* Clobbers %rbx */ | 367 | FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
368 | FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW | ||
369 | #endif | 368 | #endif |
370 | 369 | ||
371 | /* restore callee-saved registers */ | 370 | /* restore callee-saved registers */ |
@@ -449,9 +448,19 @@ END(irq_entries_start) | |||
449 | * | 448 | * |
450 | * The invariant is that, if irq_count != -1, then the IRQ stack is in use. | 449 | * The invariant is that, if irq_count != -1, then the IRQ stack is in use. |
451 | */ | 450 | */ |
452 | .macro ENTER_IRQ_STACK regs=1 old_rsp | 451 | .macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0 |
453 | DEBUG_ENTRY_ASSERT_IRQS_OFF | 452 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
453 | |||
454 | .if \save_ret | ||
455 | /* | ||
456 | * If save_ret is set, the original stack contains one additional | ||
457 | * entry -- the return address. Therefore, move the address one | ||
458 | * entry below %rsp to \old_rsp. | ||
459 | */ | ||
460 | leaq 8(%rsp), \old_rsp | ||
461 | .else | ||
454 | movq %rsp, \old_rsp | 462 | movq %rsp, \old_rsp |
463 | .endif | ||
455 | 464 | ||
456 | .if \regs | 465 | .if \regs |
457 | UNWIND_HINT_REGS base=\old_rsp | 466 | UNWIND_HINT_REGS base=\old_rsp |
@@ -497,6 +506,15 @@ END(irq_entries_start) | |||
497 | .if \regs | 506 | .if \regs |
498 | UNWIND_HINT_REGS indirect=1 | 507 | UNWIND_HINT_REGS indirect=1 |
499 | .endif | 508 | .endif |
509 | |||
510 | .if \save_ret | ||
511 | /* | ||
512 | * Push the return address to the stack. This return address can | ||
513 | * be found at the "real" original RSP, which was offset by 8 at | ||
514 | * the beginning of this macro. | ||
515 | */ | ||
516 | pushq -8(\old_rsp) | ||
517 | .endif | ||
500 | .endm | 518 | .endm |
501 | 519 | ||
502 | /* | 520 | /* |
@@ -520,27 +538,65 @@ END(irq_entries_start) | |||
520 | .endm | 538 | .endm |
521 | 539 | ||
522 | /* | 540 | /* |
523 | * Interrupt entry/exit. | 541 | * Interrupt entry helper function. |
524 | * | ||
525 | * Interrupt entry points save only callee clobbered registers in fast path. | ||
526 | * | 542 | * |
527 | * Entry runs with interrupts off. | 543 | * Entry runs with interrupts off. Stack layout at entry: |
544 | * +----------------------------------------------------+ | ||
545 | * | regs->ss | | ||
546 | * | regs->rsp | | ||
547 | * | regs->eflags | | ||
548 | * | regs->cs | | ||
549 | * | regs->ip | | ||
550 | * +----------------------------------------------------+ | ||
551 | * | regs->orig_ax = ~(interrupt number) | | ||
552 | * +----------------------------------------------------+ | ||
553 | * | return address | | ||
554 | * +----------------------------------------------------+ | ||
528 | */ | 555 | */ |
529 | 556 | ENTRY(interrupt_entry) | |
530 | /* 0(%rsp): ~(interrupt number) */ | 557 | UNWIND_HINT_FUNC |
531 | .macro interrupt func | 558 | ASM_CLAC |
532 | cld | 559 | cld |
533 | 560 | ||
534 | testb $3, CS-ORIG_RAX(%rsp) | 561 | testb $3, CS-ORIG_RAX+8(%rsp) |
535 | jz 1f | 562 | jz 1f |
536 | SWAPGS | 563 | SWAPGS |
537 | call switch_to_thread_stack | 564 | |
565 | /* | ||
566 | * Switch to the thread stack. The IRET frame and orig_ax are | ||
567 | * on the stack, as well as the return address. RDI..R12 are | ||
568 | * not (yet) on the stack and space has not (yet) been | ||
569 | * allocated for them. | ||
570 | */ | ||
571 | pushq %rdi | ||
572 | |||
573 | /* Need to switch before accessing the thread stack. */ | ||
574 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | ||
575 | movq %rsp, %rdi | ||
576 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | ||
577 | |||
578 | /* | ||
579 | * We have RDI, return address, and orig_ax on the stack on | ||
580 | * top of the IRET frame. That means offset=24 | ||
581 | */ | ||
582 | UNWIND_HINT_IRET_REGS base=%rdi offset=24 | ||
583 | |||
584 | pushq 7*8(%rdi) /* regs->ss */ | ||
585 | pushq 6*8(%rdi) /* regs->rsp */ | ||
586 | pushq 5*8(%rdi) /* regs->eflags */ | ||
587 | pushq 4*8(%rdi) /* regs->cs */ | ||
588 | pushq 3*8(%rdi) /* regs->ip */ | ||
589 | pushq 2*8(%rdi) /* regs->orig_ax */ | ||
590 | pushq 8(%rdi) /* return address */ | ||
591 | UNWIND_HINT_FUNC | ||
592 | |||
593 | movq (%rdi), %rdi | ||
538 | 1: | 594 | 1: |
539 | 595 | ||
540 | PUSH_AND_CLEAR_REGS | 596 | PUSH_AND_CLEAR_REGS save_ret=1 |
541 | ENCODE_FRAME_POINTER | 597 | ENCODE_FRAME_POINTER 8 |
542 | 598 | ||
543 | testb $3, CS(%rsp) | 599 | testb $3, CS+8(%rsp) |
544 | jz 1f | 600 | jz 1f |
545 | 601 | ||
546 | /* | 602 | /* |
@@ -548,7 +604,7 @@ END(irq_entries_start) | |||
548 | * | 604 | * |
549 | * We need to tell lockdep that IRQs are off. We can't do this until | 605 | * We need to tell lockdep that IRQs are off. We can't do this until |
550 | * we fix gsbase, and we should do it before enter_from_user_mode | 606 | * we fix gsbase, and we should do it before enter_from_user_mode |
551 | * (which can take locks). Since TRACE_IRQS_OFF idempotent, | 607 | * (which can take locks). Since TRACE_IRQS_OFF is idempotent, |
552 | * the simplest way to handle it is to just call it twice if | 608 | * the simplest way to handle it is to just call it twice if |
553 | * we enter from user mode. There's no reason to optimize this since | 609 | * we enter from user mode. There's no reason to optimize this since |
554 | * TRACE_IRQS_OFF is a no-op if lockdep is off. | 610 | * TRACE_IRQS_OFF is a no-op if lockdep is off. |
@@ -558,12 +614,15 @@ END(irq_entries_start) | |||
558 | CALL_enter_from_user_mode | 614 | CALL_enter_from_user_mode |
559 | 615 | ||
560 | 1: | 616 | 1: |
561 | ENTER_IRQ_STACK old_rsp=%rdi | 617 | ENTER_IRQ_STACK old_rsp=%rdi save_ret=1 |
562 | /* We entered an interrupt context - irqs are off: */ | 618 | /* We entered an interrupt context - irqs are off: */ |
563 | TRACE_IRQS_OFF | 619 | TRACE_IRQS_OFF |
564 | 620 | ||
565 | call \func /* rdi points to pt_regs */ | 621 | ret |
566 | .endm | 622 | END(interrupt_entry) |
623 | |||
624 | |||
625 | /* Interrupt entry/exit. */ | ||
567 | 626 | ||
568 | /* | 627 | /* |
569 | * The interrupt stubs push (~vector+0x80) onto the stack and | 628 | * The interrupt stubs push (~vector+0x80) onto the stack and |
@@ -571,9 +630,10 @@ END(irq_entries_start) | |||
571 | */ | 630 | */ |
572 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 631 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
573 | common_interrupt: | 632 | common_interrupt: |
574 | ASM_CLAC | ||
575 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ | 633 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
576 | interrupt do_IRQ | 634 | call interrupt_entry |
635 | UNWIND_HINT_REGS indirect=1 | ||
636 | call do_IRQ /* rdi points to pt_regs */ | ||
577 | /* 0(%rsp): old RSP */ | 637 | /* 0(%rsp): old RSP */ |
578 | ret_from_intr: | 638 | ret_from_intr: |
579 | DISABLE_INTERRUPTS(CLBR_ANY) | 639 | DISABLE_INTERRUPTS(CLBR_ANY) |
@@ -766,10 +826,11 @@ END(common_interrupt) | |||
766 | .macro apicinterrupt3 num sym do_sym | 826 | .macro apicinterrupt3 num sym do_sym |
767 | ENTRY(\sym) | 827 | ENTRY(\sym) |
768 | UNWIND_HINT_IRET_REGS | 828 | UNWIND_HINT_IRET_REGS |
769 | ASM_CLAC | ||
770 | pushq $~(\num) | 829 | pushq $~(\num) |
771 | .Lcommon_\sym: | 830 | .Lcommon_\sym: |
772 | interrupt \do_sym | 831 | call interrupt_entry |
832 | UNWIND_HINT_REGS indirect=1 | ||
833 | call \do_sym /* rdi points to pt_regs */ | ||
773 | jmp ret_from_intr | 834 | jmp ret_from_intr |
774 | END(\sym) | 835 | END(\sym) |
775 | .endm | 836 | .endm |
@@ -832,34 +893,6 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt | |||
832 | */ | 893 | */ |
833 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) | 894 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) |
834 | 895 | ||
835 | /* | ||
836 | * Switch to the thread stack. This is called with the IRET frame and | ||
837 | * orig_ax on the stack. (That is, RDI..R12 are not on the stack and | ||
838 | * space has not been allocated for them.) | ||
839 | */ | ||
840 | ENTRY(switch_to_thread_stack) | ||
841 | UNWIND_HINT_FUNC | ||
842 | |||
843 | pushq %rdi | ||
844 | /* Need to switch before accessing the thread stack. */ | ||
845 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | ||
846 | movq %rsp, %rdi | ||
847 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | ||
848 | UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI | ||
849 | |||
850 | pushq 7*8(%rdi) /* regs->ss */ | ||
851 | pushq 6*8(%rdi) /* regs->rsp */ | ||
852 | pushq 5*8(%rdi) /* regs->eflags */ | ||
853 | pushq 4*8(%rdi) /* regs->cs */ | ||
854 | pushq 3*8(%rdi) /* regs->ip */ | ||
855 | pushq 2*8(%rdi) /* regs->orig_ax */ | ||
856 | pushq 8(%rdi) /* return address */ | ||
857 | UNWIND_HINT_FUNC | ||
858 | |||
859 | movq (%rdi), %rdi | ||
860 | ret | ||
861 | END(switch_to_thread_stack) | ||
862 | |||
863 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 | 896 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 |
864 | ENTRY(\sym) | 897 | ENTRY(\sym) |
865 | UNWIND_HINT_IRET_REGS offset=\has_error_code*8 | 898 | UNWIND_HINT_IRET_REGS offset=\has_error_code*8 |
@@ -875,12 +908,8 @@ ENTRY(\sym) | |||
875 | pushq $-1 /* ORIG_RAX: no syscall to restart */ | 908 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
876 | .endif | 909 | .endif |
877 | 910 | ||
878 | /* Save all registers in pt_regs */ | ||
879 | PUSH_AND_CLEAR_REGS | ||
880 | ENCODE_FRAME_POINTER | ||
881 | |||
882 | .if \paranoid < 2 | 911 | .if \paranoid < 2 |
883 | testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ | 912 | testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */ |
884 | jnz .Lfrom_usermode_switch_stack_\@ | 913 | jnz .Lfrom_usermode_switch_stack_\@ |
885 | .endif | 914 | .endif |
886 | 915 | ||
@@ -1130,13 +1159,15 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 | |||
1130 | #endif | 1159 | #endif |
1131 | 1160 | ||
1132 | /* | 1161 | /* |
1133 | * Switch gs if needed. | 1162 | * Save all registers in pt_regs, and switch gs if needed. |
1134 | * Use slow, but surefire "are we in kernel?" check. | 1163 | * Use slow, but surefire "are we in kernel?" check. |
1135 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise | 1164 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise |
1136 | */ | 1165 | */ |
1137 | ENTRY(paranoid_entry) | 1166 | ENTRY(paranoid_entry) |
1138 | UNWIND_HINT_FUNC | 1167 | UNWIND_HINT_FUNC |
1139 | cld | 1168 | cld |
1169 | PUSH_AND_CLEAR_REGS save_ret=1 | ||
1170 | ENCODE_FRAME_POINTER 8 | ||
1140 | movl $1, %ebx | 1171 | movl $1, %ebx |
1141 | movl $MSR_GS_BASE, %ecx | 1172 | movl $MSR_GS_BASE, %ecx |
1142 | rdmsr | 1173 | rdmsr |
@@ -1181,12 +1212,14 @@ ENTRY(paranoid_exit) | |||
1181 | END(paranoid_exit) | 1212 | END(paranoid_exit) |
1182 | 1213 | ||
1183 | /* | 1214 | /* |
1184 | * Switch gs if needed. | 1215 | * Save all registers in pt_regs, and switch GS if needed. |
1185 | * Return: EBX=0: came from user mode; EBX=1: otherwise | 1216 | * Return: EBX=0: came from user mode; EBX=1: otherwise |
1186 | */ | 1217 | */ |
1187 | ENTRY(error_entry) | 1218 | ENTRY(error_entry) |
1188 | UNWIND_HINT_REGS offset=8 | 1219 | UNWIND_HINT_FUNC |
1189 | cld | 1220 | cld |
1221 | PUSH_AND_CLEAR_REGS save_ret=1 | ||
1222 | ENCODE_FRAME_POINTER 8 | ||
1190 | testb $3, CS+8(%rsp) | 1223 | testb $3, CS+8(%rsp) |
1191 | jz .Lerror_kernelspace | 1224 | jz .Lerror_kernelspace |
1192 | 1225 | ||
@@ -1577,8 +1610,6 @@ end_repeat_nmi: | |||
1577 | * frame to point back to repeat_nmi. | 1610 | * frame to point back to repeat_nmi. |
1578 | */ | 1611 | */ |
1579 | pushq $-1 /* ORIG_RAX: no syscall to restart */ | 1612 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
1580 | PUSH_AND_CLEAR_REGS | ||
1581 | ENCODE_FRAME_POINTER | ||
1582 | 1613 | ||
1583 | /* | 1614 | /* |
1584 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit | 1615 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index fd65e016e413..08425c42f8b7 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S | |||
@@ -85,25 +85,25 @@ ENTRY(entry_SYSENTER_compat) | |||
85 | pushq %rcx /* pt_regs->cx */ | 85 | pushq %rcx /* pt_regs->cx */ |
86 | pushq $-ENOSYS /* pt_regs->ax */ | 86 | pushq $-ENOSYS /* pt_regs->ax */ |
87 | pushq $0 /* pt_regs->r8 = 0 */ | 87 | pushq $0 /* pt_regs->r8 = 0 */ |
88 | xorq %r8, %r8 /* nospec r8 */ | 88 | xorl %r8d, %r8d /* nospec r8 */ |
89 | pushq $0 /* pt_regs->r9 = 0 */ | 89 | pushq $0 /* pt_regs->r9 = 0 */ |
90 | xorq %r9, %r9 /* nospec r9 */ | 90 | xorl %r9d, %r9d /* nospec r9 */ |
91 | pushq $0 /* pt_regs->r10 = 0 */ | 91 | pushq $0 /* pt_regs->r10 = 0 */ |
92 | xorq %r10, %r10 /* nospec r10 */ | 92 | xorl %r10d, %r10d /* nospec r10 */ |
93 | pushq $0 /* pt_regs->r11 = 0 */ | 93 | pushq $0 /* pt_regs->r11 = 0 */ |
94 | xorq %r11, %r11 /* nospec r11 */ | 94 | xorl %r11d, %r11d /* nospec r11 */ |
95 | pushq %rbx /* pt_regs->rbx */ | 95 | pushq %rbx /* pt_regs->rbx */ |
96 | xorl %ebx, %ebx /* nospec rbx */ | 96 | xorl %ebx, %ebx /* nospec rbx */ |
97 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ | 97 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
98 | xorl %ebp, %ebp /* nospec rbp */ | 98 | xorl %ebp, %ebp /* nospec rbp */ |
99 | pushq $0 /* pt_regs->r12 = 0 */ | 99 | pushq $0 /* pt_regs->r12 = 0 */ |
100 | xorq %r12, %r12 /* nospec r12 */ | 100 | xorl %r12d, %r12d /* nospec r12 */ |
101 | pushq $0 /* pt_regs->r13 = 0 */ | 101 | pushq $0 /* pt_regs->r13 = 0 */ |
102 | xorq %r13, %r13 /* nospec r13 */ | 102 | xorl %r13d, %r13d /* nospec r13 */ |
103 | pushq $0 /* pt_regs->r14 = 0 */ | 103 | pushq $0 /* pt_regs->r14 = 0 */ |
104 | xorq %r14, %r14 /* nospec r14 */ | 104 | xorl %r14d, %r14d /* nospec r14 */ |
105 | pushq $0 /* pt_regs->r15 = 0 */ | 105 | pushq $0 /* pt_regs->r15 = 0 */ |
106 | xorq %r15, %r15 /* nospec r15 */ | 106 | xorl %r15d, %r15d /* nospec r15 */ |
107 | cld | 107 | cld |
108 | 108 | ||
109 | /* | 109 | /* |
@@ -224,25 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) | |||
224 | pushq %rbp /* pt_regs->cx (stashed in bp) */ | 224 | pushq %rbp /* pt_regs->cx (stashed in bp) */ |
225 | pushq $-ENOSYS /* pt_regs->ax */ | 225 | pushq $-ENOSYS /* pt_regs->ax */ |
226 | pushq $0 /* pt_regs->r8 = 0 */ | 226 | pushq $0 /* pt_regs->r8 = 0 */ |
227 | xorq %r8, %r8 /* nospec r8 */ | 227 | xorl %r8d, %r8d /* nospec r8 */ |
228 | pushq $0 /* pt_regs->r9 = 0 */ | 228 | pushq $0 /* pt_regs->r9 = 0 */ |
229 | xorq %r9, %r9 /* nospec r9 */ | 229 | xorl %r9d, %r9d /* nospec r9 */ |
230 | pushq $0 /* pt_regs->r10 = 0 */ | 230 | pushq $0 /* pt_regs->r10 = 0 */ |
231 | xorq %r10, %r10 /* nospec r10 */ | 231 | xorl %r10d, %r10d /* nospec r10 */ |
232 | pushq $0 /* pt_regs->r11 = 0 */ | 232 | pushq $0 /* pt_regs->r11 = 0 */ |
233 | xorq %r11, %r11 /* nospec r11 */ | 233 | xorl %r11d, %r11d /* nospec r11 */ |
234 | pushq %rbx /* pt_regs->rbx */ | 234 | pushq %rbx /* pt_regs->rbx */ |
235 | xorl %ebx, %ebx /* nospec rbx */ | 235 | xorl %ebx, %ebx /* nospec rbx */ |
236 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ | 236 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
237 | xorl %ebp, %ebp /* nospec rbp */ | 237 | xorl %ebp, %ebp /* nospec rbp */ |
238 | pushq $0 /* pt_regs->r12 = 0 */ | 238 | pushq $0 /* pt_regs->r12 = 0 */ |
239 | xorq %r12, %r12 /* nospec r12 */ | 239 | xorl %r12d, %r12d /* nospec r12 */ |
240 | pushq $0 /* pt_regs->r13 = 0 */ | 240 | pushq $0 /* pt_regs->r13 = 0 */ |
241 | xorq %r13, %r13 /* nospec r13 */ | 241 | xorl %r13d, %r13d /* nospec r13 */ |
242 | pushq $0 /* pt_regs->r14 = 0 */ | 242 | pushq $0 /* pt_regs->r14 = 0 */ |
243 | xorq %r14, %r14 /* nospec r14 */ | 243 | xorl %r14d, %r14d /* nospec r14 */ |
244 | pushq $0 /* pt_regs->r15 = 0 */ | 244 | pushq $0 /* pt_regs->r15 = 0 */ |
245 | xorq %r15, %r15 /* nospec r15 */ | 245 | xorl %r15d, %r15d /* nospec r15 */ |
246 | 246 | ||
247 | /* | 247 | /* |
248 | * User mode is traced as though IRQs are on, and SYSENTER | 248 | * User mode is traced as though IRQs are on, and SYSENTER |
@@ -298,9 +298,9 @@ sysret32_from_system_call: | |||
298 | */ | 298 | */ |
299 | SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 | 299 | SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 |
300 | 300 | ||
301 | xorq %r8, %r8 | 301 | xorl %r8d, %r8d |
302 | xorq %r9, %r9 | 302 | xorl %r9d, %r9d |
303 | xorq %r10, %r10 | 303 | xorl %r10d, %r10d |
304 | swapgs | 304 | swapgs |
305 | sysretl | 305 | sysretl |
306 | END(entry_SYSCALL_compat) | 306 | END(entry_SYSCALL_compat) |
@@ -347,36 +347,47 @@ ENTRY(entry_INT80_compat) | |||
347 | */ | 347 | */ |
348 | movl %eax, %eax | 348 | movl %eax, %eax |
349 | 349 | ||
350 | /* switch to thread stack expects orig_ax and rdi to be pushed */ | ||
350 | pushq %rax /* pt_regs->orig_ax */ | 351 | pushq %rax /* pt_regs->orig_ax */ |
352 | pushq %rdi /* pt_regs->di */ | ||
351 | 353 | ||
352 | /* switch to thread stack expects orig_ax to be pushed */ | 354 | /* Need to switch before accessing the thread stack. */ |
353 | call switch_to_thread_stack | 355 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi |
356 | movq %rsp, %rdi | ||
357 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | ||
354 | 358 | ||
355 | pushq %rdi /* pt_regs->di */ | 359 | pushq 6*8(%rdi) /* regs->ss */ |
360 | pushq 5*8(%rdi) /* regs->rsp */ | ||
361 | pushq 4*8(%rdi) /* regs->eflags */ | ||
362 | pushq 3*8(%rdi) /* regs->cs */ | ||
363 | pushq 2*8(%rdi) /* regs->ip */ | ||
364 | pushq 1*8(%rdi) /* regs->orig_ax */ | ||
365 | |||
366 | pushq (%rdi) /* pt_regs->di */ | ||
356 | pushq %rsi /* pt_regs->si */ | 367 | pushq %rsi /* pt_regs->si */ |
357 | pushq %rdx /* pt_regs->dx */ | 368 | pushq %rdx /* pt_regs->dx */ |
358 | pushq %rcx /* pt_regs->cx */ | 369 | pushq %rcx /* pt_regs->cx */ |
359 | pushq $-ENOSYS /* pt_regs->ax */ | 370 | pushq $-ENOSYS /* pt_regs->ax */ |
360 | pushq $0 /* pt_regs->r8 = 0 */ | 371 | pushq $0 /* pt_regs->r8 = 0 */ |
361 | xorq %r8, %r8 /* nospec r8 */ | 372 | xorl %r8d, %r8d /* nospec r8 */ |
362 | pushq $0 /* pt_regs->r9 = 0 */ | 373 | pushq $0 /* pt_regs->r9 = 0 */ |
363 | xorq %r9, %r9 /* nospec r9 */ | 374 | xorl %r9d, %r9d /* nospec r9 */ |
364 | pushq $0 /* pt_regs->r10 = 0 */ | 375 | pushq $0 /* pt_regs->r10 = 0 */ |
365 | xorq %r10, %r10 /* nospec r10 */ | 376 | xorl %r10d, %r10d /* nospec r10 */ |
366 | pushq $0 /* pt_regs->r11 = 0 */ | 377 | pushq $0 /* pt_regs->r11 = 0 */ |
367 | xorq %r11, %r11 /* nospec r11 */ | 378 | xorl %r11d, %r11d /* nospec r11 */ |
368 | pushq %rbx /* pt_regs->rbx */ | 379 | pushq %rbx /* pt_regs->rbx */ |
369 | xorl %ebx, %ebx /* nospec rbx */ | 380 | xorl %ebx, %ebx /* nospec rbx */ |
370 | pushq %rbp /* pt_regs->rbp */ | 381 | pushq %rbp /* pt_regs->rbp */ |
371 | xorl %ebp, %ebp /* nospec rbp */ | 382 | xorl %ebp, %ebp /* nospec rbp */ |
372 | pushq %r12 /* pt_regs->r12 */ | 383 | pushq %r12 /* pt_regs->r12 */ |
373 | xorq %r12, %r12 /* nospec r12 */ | 384 | xorl %r12d, %r12d /* nospec r12 */ |
374 | pushq %r13 /* pt_regs->r13 */ | 385 | pushq %r13 /* pt_regs->r13 */ |
375 | xorq %r13, %r13 /* nospec r13 */ | 386 | xorl %r13d, %r13d /* nospec r13 */ |
376 | pushq %r14 /* pt_regs->r14 */ | 387 | pushq %r14 /* pt_regs->r14 */ |
377 | xorq %r14, %r14 /* nospec r14 */ | 388 | xorl %r14d, %r14d /* nospec r14 */ |
378 | pushq %r15 /* pt_regs->r15 */ | 389 | pushq %r15 /* pt_regs->r15 */ |
379 | xorq %r15, %r15 /* nospec r15 */ | 390 | xorl %r15d, %r15d /* nospec r15 */ |
380 | cld | 391 | cld |
381 | 392 | ||
382 | /* | 393 | /* |
@@ -393,15 +404,3 @@ ENTRY(entry_INT80_compat) | |||
393 | TRACE_IRQS_ON | 404 | TRACE_IRQS_ON |
394 | jmp swapgs_restore_regs_and_return_to_usermode | 405 | jmp swapgs_restore_regs_and_return_to_usermode |
395 | END(entry_INT80_compat) | 406 | END(entry_INT80_compat) |
396 | |||
397 | ENTRY(stub32_clone) | ||
398 | /* | ||
399 | * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr). | ||
400 | * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val). | ||
401 | * | ||
402 | * The native 64-bit kernel's sys_clone() implements the latter, | ||
403 | * so we need to swap arguments here before calling it: | ||
404 | */ | ||
405 | xchg %r8, %rcx | ||
406 | jmp sys_clone | ||
407 | ENDPROC(stub32_clone) | ||
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 448ac2161112..2a5e99cff859 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl | |||
@@ -8,12 +8,12 @@ | |||
8 | # | 8 | # |
9 | 0 i386 restart_syscall sys_restart_syscall | 9 | 0 i386 restart_syscall sys_restart_syscall |
10 | 1 i386 exit sys_exit | 10 | 1 i386 exit sys_exit |
11 | 2 i386 fork sys_fork sys_fork | 11 | 2 i386 fork sys_fork |
12 | 3 i386 read sys_read | 12 | 3 i386 read sys_read |
13 | 4 i386 write sys_write | 13 | 4 i386 write sys_write |
14 | 5 i386 open sys_open compat_sys_open | 14 | 5 i386 open sys_open compat_sys_open |
15 | 6 i386 close sys_close | 15 | 6 i386 close sys_close |
16 | 7 i386 waitpid sys_waitpid sys32_waitpid | 16 | 7 i386 waitpid sys_waitpid compat_sys_x86_waitpid |
17 | 8 i386 creat sys_creat | 17 | 8 i386 creat sys_creat |
18 | 9 i386 link sys_link | 18 | 9 i386 link sys_link |
19 | 10 i386 unlink sys_unlink | 19 | 10 i386 unlink sys_unlink |
@@ -78,7 +78,7 @@ | |||
78 | 69 i386 ssetmask sys_ssetmask | 78 | 69 i386 ssetmask sys_ssetmask |
79 | 70 i386 setreuid sys_setreuid16 | 79 | 70 i386 setreuid sys_setreuid16 |
80 | 71 i386 setregid sys_setregid16 | 80 | 71 i386 setregid sys_setregid16 |
81 | 72 i386 sigsuspend sys_sigsuspend sys_sigsuspend | 81 | 72 i386 sigsuspend sys_sigsuspend |
82 | 73 i386 sigpending sys_sigpending compat_sys_sigpending | 82 | 73 i386 sigpending sys_sigpending compat_sys_sigpending |
83 | 74 i386 sethostname sys_sethostname | 83 | 74 i386 sethostname sys_sethostname |
84 | 75 i386 setrlimit sys_setrlimit compat_sys_setrlimit | 84 | 75 i386 setrlimit sys_setrlimit compat_sys_setrlimit |
@@ -96,7 +96,7 @@ | |||
96 | 87 i386 swapon sys_swapon | 96 | 87 i386 swapon sys_swapon |
97 | 88 i386 reboot sys_reboot | 97 | 88 i386 reboot sys_reboot |
98 | 89 i386 readdir sys_old_readdir compat_sys_old_readdir | 98 | 89 i386 readdir sys_old_readdir compat_sys_old_readdir |
99 | 90 i386 mmap sys_old_mmap sys32_mmap | 99 | 90 i386 mmap sys_old_mmap compat_sys_x86_mmap |
100 | 91 i386 munmap sys_munmap | 100 | 91 i386 munmap sys_munmap |
101 | 92 i386 truncate sys_truncate compat_sys_truncate | 101 | 92 i386 truncate sys_truncate compat_sys_truncate |
102 | 93 i386 ftruncate sys_ftruncate compat_sys_ftruncate | 102 | 93 i386 ftruncate sys_ftruncate compat_sys_ftruncate |
@@ -126,7 +126,7 @@ | |||
126 | 117 i386 ipc sys_ipc compat_sys_ipc | 126 | 117 i386 ipc sys_ipc compat_sys_ipc |
127 | 118 i386 fsync sys_fsync | 127 | 118 i386 fsync sys_fsync |
128 | 119 i386 sigreturn sys_sigreturn sys32_sigreturn | 128 | 119 i386 sigreturn sys_sigreturn sys32_sigreturn |
129 | 120 i386 clone sys_clone stub32_clone | 129 | 120 i386 clone sys_clone compat_sys_x86_clone |
130 | 121 i386 setdomainname sys_setdomainname | 130 | 121 i386 setdomainname sys_setdomainname |
131 | 122 i386 uname sys_newuname | 131 | 122 i386 uname sys_newuname |
132 | 123 i386 modify_ldt sys_modify_ldt | 132 | 123 i386 modify_ldt sys_modify_ldt |
@@ -186,8 +186,8 @@ | |||
186 | 177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait | 186 | 177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait |
187 | 178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo | 187 | 178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo |
188 | 179 i386 rt_sigsuspend sys_rt_sigsuspend | 188 | 179 i386 rt_sigsuspend sys_rt_sigsuspend |
189 | 180 i386 pread64 sys_pread64 sys32_pread | 189 | 180 i386 pread64 sys_pread64 compat_sys_x86_pread |
190 | 181 i386 pwrite64 sys_pwrite64 sys32_pwrite | 190 | 181 i386 pwrite64 sys_pwrite64 compat_sys_x86_pwrite |
191 | 182 i386 chown sys_chown16 | 191 | 182 i386 chown sys_chown16 |
192 | 183 i386 getcwd sys_getcwd | 192 | 183 i386 getcwd sys_getcwd |
193 | 184 i386 capget sys_capget | 193 | 184 i386 capget sys_capget |
@@ -196,14 +196,14 @@ | |||
196 | 187 i386 sendfile sys_sendfile compat_sys_sendfile | 196 | 187 i386 sendfile sys_sendfile compat_sys_sendfile |
197 | 188 i386 getpmsg | 197 | 188 i386 getpmsg |
198 | 189 i386 putpmsg | 198 | 189 i386 putpmsg |
199 | 190 i386 vfork sys_vfork sys_vfork | 199 | 190 i386 vfork sys_vfork |
200 | 191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit | 200 | 191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit |
201 | 192 i386 mmap2 sys_mmap_pgoff | 201 | 192 i386 mmap2 sys_mmap_pgoff |
202 | 193 i386 truncate64 sys_truncate64 sys32_truncate64 | 202 | 193 i386 truncate64 sys_truncate64 compat_sys_x86_truncate64 |
203 | 194 i386 ftruncate64 sys_ftruncate64 sys32_ftruncate64 | 203 | 194 i386 ftruncate64 sys_ftruncate64 compat_sys_x86_ftruncate64 |
204 | 195 i386 stat64 sys_stat64 sys32_stat64 | 204 | 195 i386 stat64 sys_stat64 compat_sys_x86_stat64 |
205 | 196 i386 lstat64 sys_lstat64 sys32_lstat64 | 205 | 196 i386 lstat64 sys_lstat64 compat_sys_x86_lstat64 |
206 | 197 i386 fstat64 sys_fstat64 sys32_fstat64 | 206 | 197 i386 fstat64 sys_fstat64 compat_sys_x86_fstat64 |
207 | 198 i386 lchown32 sys_lchown | 207 | 198 i386 lchown32 sys_lchown |
208 | 199 i386 getuid32 sys_getuid | 208 | 199 i386 getuid32 sys_getuid |
209 | 200 i386 getgid32 sys_getgid | 209 | 200 i386 getgid32 sys_getgid |
@@ -231,7 +231,7 @@ | |||
231 | # 222 is unused | 231 | # 222 is unused |
232 | # 223 is unused | 232 | # 223 is unused |
233 | 224 i386 gettid sys_gettid | 233 | 224 i386 gettid sys_gettid |
234 | 225 i386 readahead sys_readahead sys32_readahead | 234 | 225 i386 readahead sys_readahead compat_sys_x86_readahead |
235 | 226 i386 setxattr sys_setxattr | 235 | 226 i386 setxattr sys_setxattr |
236 | 227 i386 lsetxattr sys_lsetxattr | 236 | 227 i386 lsetxattr sys_lsetxattr |
237 | 228 i386 fsetxattr sys_fsetxattr | 237 | 228 i386 fsetxattr sys_fsetxattr |
@@ -256,7 +256,7 @@ | |||
256 | 247 i386 io_getevents sys_io_getevents compat_sys_io_getevents | 256 | 247 i386 io_getevents sys_io_getevents compat_sys_io_getevents |
257 | 248 i386 io_submit sys_io_submit compat_sys_io_submit | 257 | 248 i386 io_submit sys_io_submit compat_sys_io_submit |
258 | 249 i386 io_cancel sys_io_cancel | 258 | 249 i386 io_cancel sys_io_cancel |
259 | 250 i386 fadvise64 sys_fadvise64 sys32_fadvise64 | 259 | 250 i386 fadvise64 sys_fadvise64 compat_sys_x86_fadvise64 |
260 | # 251 is available for reuse (was briefly sys_set_zone_reclaim) | 260 | # 251 is available for reuse (was briefly sys_set_zone_reclaim) |
261 | 252 i386 exit_group sys_exit_group | 261 | 252 i386 exit_group sys_exit_group |
262 | 253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie | 262 | 253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie |
@@ -278,7 +278,7 @@ | |||
278 | 269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 | 278 | 269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 |
279 | 270 i386 tgkill sys_tgkill | 279 | 270 i386 tgkill sys_tgkill |
280 | 271 i386 utimes sys_utimes compat_sys_utimes | 280 | 271 i386 utimes sys_utimes compat_sys_utimes |
281 | 272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64 | 281 | 272 i386 fadvise64_64 sys_fadvise64_64 compat_sys_x86_fadvise64_64 |
282 | 273 i386 vserver | 282 | 273 i386 vserver |
283 | 274 i386 mbind sys_mbind | 283 | 274 i386 mbind sys_mbind |
284 | 275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy | 284 | 275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy |
@@ -306,7 +306,7 @@ | |||
306 | 297 i386 mknodat sys_mknodat | 306 | 297 i386 mknodat sys_mknodat |
307 | 298 i386 fchownat sys_fchownat | 307 | 298 i386 fchownat sys_fchownat |
308 | 299 i386 futimesat sys_futimesat compat_sys_futimesat | 308 | 299 i386 futimesat sys_futimesat compat_sys_futimesat |
309 | 300 i386 fstatat64 sys_fstatat64 sys32_fstatat | 309 | 300 i386 fstatat64 sys_fstatat64 compat_sys_x86_fstatat |
310 | 301 i386 unlinkat sys_unlinkat | 310 | 301 i386 unlinkat sys_unlinkat |
311 | 302 i386 renameat sys_renameat | 311 | 302 i386 renameat sys_renameat |
312 | 303 i386 linkat sys_linkat | 312 | 303 i386 linkat sys_linkat |
@@ -320,7 +320,7 @@ | |||
320 | 311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list | 320 | 311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list |
321 | 312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list | 321 | 312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list |
322 | 313 i386 splice sys_splice | 322 | 313 i386 splice sys_splice |
323 | 314 i386 sync_file_range sys_sync_file_range sys32_sync_file_range | 323 | 314 i386 sync_file_range sys_sync_file_range compat_sys_x86_sync_file_range |
324 | 315 i386 tee sys_tee | 324 | 315 i386 tee sys_tee |
325 | 316 i386 vmsplice sys_vmsplice compat_sys_vmsplice | 325 | 316 i386 vmsplice sys_vmsplice compat_sys_vmsplice |
326 | 317 i386 move_pages sys_move_pages compat_sys_move_pages | 326 | 317 i386 move_pages sys_move_pages compat_sys_move_pages |
@@ -330,7 +330,7 @@ | |||
330 | 321 i386 signalfd sys_signalfd compat_sys_signalfd | 330 | 321 i386 signalfd sys_signalfd compat_sys_signalfd |
331 | 322 i386 timerfd_create sys_timerfd_create | 331 | 322 i386 timerfd_create sys_timerfd_create |
332 | 323 i386 eventfd sys_eventfd | 332 | 323 i386 eventfd sys_eventfd |
333 | 324 i386 fallocate sys_fallocate sys32_fallocate | 333 | 324 i386 fallocate sys_fallocate compat_sys_x86_fallocate |
334 | 325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime | 334 | 325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime |
335 | 326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime | 335 | 326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime |
336 | 327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4 | 336 | 327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4 |
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 577fa8adb785..8560ef68a9d6 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c | |||
@@ -42,10 +42,8 @@ | |||
42 | #define CREATE_TRACE_POINTS | 42 | #define CREATE_TRACE_POINTS |
43 | #include "vsyscall_trace.h" | 43 | #include "vsyscall_trace.h" |
44 | 44 | ||
45 | static enum { EMULATE, NATIVE, NONE } vsyscall_mode = | 45 | static enum { EMULATE, NONE } vsyscall_mode = |
46 | #if defined(CONFIG_LEGACY_VSYSCALL_NATIVE) | 46 | #ifdef CONFIG_LEGACY_VSYSCALL_NONE |
47 | NATIVE; | ||
48 | #elif defined(CONFIG_LEGACY_VSYSCALL_NONE) | ||
49 | NONE; | 47 | NONE; |
50 | #else | 48 | #else |
51 | EMULATE; | 49 | EMULATE; |
@@ -56,8 +54,6 @@ static int __init vsyscall_setup(char *str) | |||
56 | if (str) { | 54 | if (str) { |
57 | if (!strcmp("emulate", str)) | 55 | if (!strcmp("emulate", str)) |
58 | vsyscall_mode = EMULATE; | 56 | vsyscall_mode = EMULATE; |
59 | else if (!strcmp("native", str)) | ||
60 | vsyscall_mode = NATIVE; | ||
61 | else if (!strcmp("none", str)) | 57 | else if (!strcmp("none", str)) |
62 | vsyscall_mode = NONE; | 58 | vsyscall_mode = NONE; |
63 | else | 59 | else |
@@ -139,10 +135,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) | |||
139 | 135 | ||
140 | WARN_ON_ONCE(address != regs->ip); | 136 | WARN_ON_ONCE(address != regs->ip); |
141 | 137 | ||
142 | /* This should be unreachable in NATIVE mode. */ | ||
143 | if (WARN_ON(vsyscall_mode == NATIVE)) | ||
144 | return false; | ||
145 | |||
146 | if (vsyscall_mode == NONE) { | 138 | if (vsyscall_mode == NONE) { |
147 | warn_bad_vsyscall(KERN_INFO, regs, | 139 | warn_bad_vsyscall(KERN_INFO, regs, |
148 | "vsyscall attempted with vsyscall=none"); | 140 | "vsyscall attempted with vsyscall=none"); |
@@ -370,9 +362,7 @@ void __init map_vsyscall(void) | |||
370 | 362 | ||
371 | if (vsyscall_mode != NONE) { | 363 | if (vsyscall_mode != NONE) { |
372 | __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, | 364 | __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, |
373 | vsyscall_mode == NATIVE | 365 | PAGE_KERNEL_VVAR); |
374 | ? PAGE_KERNEL_VSYSCALL | ||
375 | : PAGE_KERNEL_VVAR); | ||
376 | set_vsyscall_pgtable_user_bits(swapper_pg_dir); | 366 | set_vsyscall_pgtable_user_bits(swapper_pg_dir); |
377 | } | 367 | } |
378 | 368 | ||
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 6d8044ab1060..22ec65bc033a 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c | |||
@@ -3606,7 +3606,7 @@ static struct intel_uncore_type skx_uncore_imc = { | |||
3606 | }; | 3606 | }; |
3607 | 3607 | ||
3608 | static struct attribute *skx_upi_uncore_formats_attr[] = { | 3608 | static struct attribute *skx_upi_uncore_formats_attr[] = { |
3609 | &format_attr_event_ext.attr, | 3609 | &format_attr_event.attr, |
3610 | &format_attr_umask_ext.attr, | 3610 | &format_attr_umask_ext.attr, |
3611 | &format_attr_edge.attr, | 3611 | &format_attr_edge.attr, |
3612 | &format_attr_inv.attr, | 3612 | &format_attr_inv.attr, |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 96cd33bbfc85..6512498bbef6 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -51,15 +51,14 @@ | |||
51 | #define AA(__x) ((unsigned long)(__x)) | 51 | #define AA(__x) ((unsigned long)(__x)) |
52 | 52 | ||
53 | 53 | ||
54 | asmlinkage long sys32_truncate64(const char __user *filename, | 54 | COMPAT_SYSCALL_DEFINE3(x86_truncate64, const char __user *, filename, |
55 | unsigned long offset_low, | 55 | unsigned long, offset_low, unsigned long, offset_high) |
56 | unsigned long offset_high) | ||
57 | { | 56 | { |
58 | return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low); | 57 | return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low); |
59 | } | 58 | } |
60 | 59 | ||
61 | asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, | 60 | COMPAT_SYSCALL_DEFINE3(x86_ftruncate64, unsigned int, fd, |
62 | unsigned long offset_high) | 61 | unsigned long, offset_low, unsigned long, offset_high) |
63 | { | 62 | { |
64 | return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low); | 63 | return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low); |
65 | } | 64 | } |
@@ -96,8 +95,8 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) | |||
96 | return 0; | 95 | return 0; |
97 | } | 96 | } |
98 | 97 | ||
99 | asmlinkage long sys32_stat64(const char __user *filename, | 98 | COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename, |
100 | struct stat64 __user *statbuf) | 99 | struct stat64 __user *, statbuf) |
101 | { | 100 | { |
102 | struct kstat stat; | 101 | struct kstat stat; |
103 | int ret = vfs_stat(filename, &stat); | 102 | int ret = vfs_stat(filename, &stat); |
@@ -107,8 +106,8 @@ asmlinkage long sys32_stat64(const char __user *filename, | |||
107 | return ret; | 106 | return ret; |
108 | } | 107 | } |
109 | 108 | ||
110 | asmlinkage long sys32_lstat64(const char __user *filename, | 109 | COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename, |
111 | struct stat64 __user *statbuf) | 110 | struct stat64 __user *, statbuf) |
112 | { | 111 | { |
113 | struct kstat stat; | 112 | struct kstat stat; |
114 | int ret = vfs_lstat(filename, &stat); | 113 | int ret = vfs_lstat(filename, &stat); |
@@ -117,7 +116,8 @@ asmlinkage long sys32_lstat64(const char __user *filename, | |||
117 | return ret; | 116 | return ret; |
118 | } | 117 | } |
119 | 118 | ||
120 | asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf) | 119 | COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd, |
120 | struct stat64 __user *, statbuf) | ||
121 | { | 121 | { |
122 | struct kstat stat; | 122 | struct kstat stat; |
123 | int ret = vfs_fstat(fd, &stat); | 123 | int ret = vfs_fstat(fd, &stat); |
@@ -126,8 +126,9 @@ asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf) | |||
126 | return ret; | 126 | return ret; |
127 | } | 127 | } |
128 | 128 | ||
129 | asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename, | 129 | COMPAT_SYSCALL_DEFINE4(x86_fstatat, unsigned int, dfd, |
130 | struct stat64 __user *statbuf, int flag) | 130 | const char __user *, filename, |
131 | struct stat64 __user *, statbuf, int, flag) | ||
131 | { | 132 | { |
132 | struct kstat stat; | 133 | struct kstat stat; |
133 | int error; | 134 | int error; |
@@ -153,7 +154,7 @@ struct mmap_arg_struct32 { | |||
153 | unsigned int offset; | 154 | unsigned int offset; |
154 | }; | 155 | }; |
155 | 156 | ||
156 | asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg) | 157 | COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg) |
157 | { | 158 | { |
158 | struct mmap_arg_struct32 a; | 159 | struct mmap_arg_struct32 a; |
159 | 160 | ||
@@ -167,22 +168,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg) | |||
167 | a.offset>>PAGE_SHIFT); | 168 | a.offset>>PAGE_SHIFT); |
168 | } | 169 | } |
169 | 170 | ||
170 | asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr, | 171 | COMPAT_SYSCALL_DEFINE3(x86_waitpid, compat_pid_t, pid, unsigned int __user *, |
171 | int options) | 172 | stat_addr, int, options) |
172 | { | 173 | { |
173 | return compat_sys_wait4(pid, stat_addr, options, NULL); | 174 | return compat_sys_wait4(pid, stat_addr, options, NULL); |
174 | } | 175 | } |
175 | 176 | ||
176 | /* warning: next two assume little endian */ | 177 | /* warning: next two assume little endian */ |
177 | asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count, | 178 | COMPAT_SYSCALL_DEFINE5(x86_pread, unsigned int, fd, char __user *, ubuf, |
178 | u32 poslo, u32 poshi) | 179 | u32, count, u32, poslo, u32, poshi) |
179 | { | 180 | { |
180 | return sys_pread64(fd, ubuf, count, | 181 | return sys_pread64(fd, ubuf, count, |
181 | ((loff_t)AA(poshi) << 32) | AA(poslo)); | 182 | ((loff_t)AA(poshi) << 32) | AA(poslo)); |
182 | } | 183 | } |
183 | 184 | ||
184 | asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf, | 185 | COMPAT_SYSCALL_DEFINE5(x86_pwrite, unsigned int, fd, const char __user *, ubuf, |
185 | u32 count, u32 poslo, u32 poshi) | 186 | u32, count, u32, poslo, u32, poshi) |
186 | { | 187 | { |
187 | return sys_pwrite64(fd, ubuf, count, | 188 | return sys_pwrite64(fd, ubuf, count, |
188 | ((loff_t)AA(poshi) << 32) | AA(poslo)); | 189 | ((loff_t)AA(poshi) << 32) | AA(poslo)); |
@@ -193,8 +194,9 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf, | |||
193 | * Some system calls that need sign extended arguments. This could be | 194 | * Some system calls that need sign extended arguments. This could be |
194 | * done by a generic wrapper. | 195 | * done by a generic wrapper. |
195 | */ | 196 | */ |
196 | long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, | 197 | COMPAT_SYSCALL_DEFINE6(x86_fadvise64_64, int, fd, __u32, offset_low, |
197 | __u32 len_low, __u32 len_high, int advice) | 198 | __u32, offset_high, __u32, len_low, __u32, len_high, |
199 | int, advice) | ||
198 | { | 200 | { |
199 | return sys_fadvise64_64(fd, | 201 | return sys_fadvise64_64(fd, |
200 | (((u64)offset_high)<<32) | offset_low, | 202 | (((u64)offset_high)<<32) | offset_low, |
@@ -202,31 +204,43 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, | |||
202 | advice); | 204 | advice); |
203 | } | 205 | } |
204 | 206 | ||
205 | asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, | 207 | COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo, |
206 | size_t count) | 208 | unsigned int, off_hi, size_t, count) |
207 | { | 209 | { |
208 | return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); | 210 | return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); |
209 | } | 211 | } |
210 | 212 | ||
211 | asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi, | 213 | COMPAT_SYSCALL_DEFINE6(x86_sync_file_range, int, fd, unsigned int, off_low, |
212 | unsigned n_low, unsigned n_hi, int flags) | 214 | unsigned int, off_hi, unsigned int, n_low, |
215 | unsigned int, n_hi, int, flags) | ||
213 | { | 216 | { |
214 | return sys_sync_file_range(fd, | 217 | return sys_sync_file_range(fd, |
215 | ((u64)off_hi << 32) | off_low, | 218 | ((u64)off_hi << 32) | off_low, |
216 | ((u64)n_hi << 32) | n_low, flags); | 219 | ((u64)n_hi << 32) | n_low, flags); |
217 | } | 220 | } |
218 | 221 | ||
219 | asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, | 222 | COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo, |
220 | size_t len, int advice) | 223 | unsigned int, offset_hi, size_t, len, int, advice) |
221 | { | 224 | { |
222 | return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, | 225 | return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, |
223 | len, advice); | 226 | len, advice); |
224 | } | 227 | } |
225 | 228 | ||
226 | asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo, | 229 | COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode, |
227 | unsigned offset_hi, unsigned len_lo, | 230 | unsigned int, offset_lo, unsigned int, offset_hi, |
228 | unsigned len_hi) | 231 | unsigned int, len_lo, unsigned int, len_hi) |
229 | { | 232 | { |
230 | return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, | 233 | return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, |
231 | ((u64)len_hi << 32) | len_lo); | 234 | ((u64)len_hi << 32) | len_lo); |
232 | } | 235 | } |
236 | |||
237 | /* | ||
238 | * The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS | ||
239 | */ | ||
240 | COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags, | ||
241 | unsigned long, newsp, int __user *, parent_tidptr, | ||
242 | unsigned long, tls_val, int __user *, child_tidptr) | ||
243 | { | ||
244 | return sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr, | ||
245 | tls_val); | ||
246 | } | ||
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h index 4d4015ddcf26..c356098b6fb9 100644 --- a/arch/x86/include/asm/apm.h +++ b/arch/x86/include/asm/apm.h | |||
@@ -7,6 +7,8 @@ | |||
7 | #ifndef _ASM_X86_MACH_DEFAULT_APM_H | 7 | #ifndef _ASM_X86_MACH_DEFAULT_APM_H |
8 | #define _ASM_X86_MACH_DEFAULT_APM_H | 8 | #define _ASM_X86_MACH_DEFAULT_APM_H |
9 | 9 | ||
10 | #include <asm/nospec-branch.h> | ||
11 | |||
10 | #ifdef APM_ZERO_SEGS | 12 | #ifdef APM_ZERO_SEGS |
11 | # define APM_DO_ZERO_SEGS \ | 13 | # define APM_DO_ZERO_SEGS \ |
12 | "pushl %%ds\n\t" \ | 14 | "pushl %%ds\n\t" \ |
@@ -32,6 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, | |||
32 | * N.B. We do NOT need a cld after the BIOS call | 34 | * N.B. We do NOT need a cld after the BIOS call |
33 | * because we always save and restore the flags. | 35 | * because we always save and restore the flags. |
34 | */ | 36 | */ |
37 | firmware_restrict_branch_speculation_start(); | ||
35 | __asm__ __volatile__(APM_DO_ZERO_SEGS | 38 | __asm__ __volatile__(APM_DO_ZERO_SEGS |
36 | "pushl %%edi\n\t" | 39 | "pushl %%edi\n\t" |
37 | "pushl %%ebp\n\t" | 40 | "pushl %%ebp\n\t" |
@@ -44,6 +47,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, | |||
44 | "=S" (*esi) | 47 | "=S" (*esi) |
45 | : "a" (func), "b" (ebx_in), "c" (ecx_in) | 48 | : "a" (func), "b" (ebx_in), "c" (ecx_in) |
46 | : "memory", "cc"); | 49 | : "memory", "cc"); |
50 | firmware_restrict_branch_speculation_end(); | ||
47 | } | 51 | } |
48 | 52 | ||
49 | static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, | 53 | static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, |
@@ -56,6 +60,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, | |||
56 | * N.B. We do NOT need a cld after the BIOS call | 60 | * N.B. We do NOT need a cld after the BIOS call |
57 | * because we always save and restore the flags. | 61 | * because we always save and restore the flags. |
58 | */ | 62 | */ |
63 | firmware_restrict_branch_speculation_start(); | ||
59 | __asm__ __volatile__(APM_DO_ZERO_SEGS | 64 | __asm__ __volatile__(APM_DO_ZERO_SEGS |
60 | "pushl %%edi\n\t" | 65 | "pushl %%edi\n\t" |
61 | "pushl %%ebp\n\t" | 66 | "pushl %%ebp\n\t" |
@@ -68,6 +73,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, | |||
68 | "=S" (si) | 73 | "=S" (si) |
69 | : "a" (func), "b" (ebx_in), "c" (ecx_in) | 74 | : "a" (func), "b" (ebx_in), "c" (ecx_in) |
70 | : "memory", "cc"); | 75 | : "memory", "cc"); |
76 | firmware_restrict_branch_speculation_end(); | ||
71 | return error; | 77 | return error; |
72 | } | 78 | } |
73 | 79 | ||
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 4d111616524b..1908214b9125 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h | |||
@@ -38,7 +38,4 @@ INDIRECT_THUNK(dx) | |||
38 | INDIRECT_THUNK(si) | 38 | INDIRECT_THUNK(si) |
39 | INDIRECT_THUNK(di) | 39 | INDIRECT_THUNK(di) |
40 | INDIRECT_THUNK(bp) | 40 | INDIRECT_THUNK(bp) |
41 | asmlinkage void __fill_rsb(void); | ||
42 | asmlinkage void __clear_rsb(void); | ||
43 | |||
44 | #endif /* CONFIG_RETPOLINE */ | 41 | #endif /* CONFIG_RETPOLINE */ |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 3fa039855b8f..9f645ba57dbb 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -78,7 +78,7 @@ set_bit(long nr, volatile unsigned long *addr) | |||
78 | : "iq" ((u8)CONST_MASK(nr)) | 78 | : "iq" ((u8)CONST_MASK(nr)) |
79 | : "memory"); | 79 | : "memory"); |
80 | } else { | 80 | } else { |
81 | asm volatile(LOCK_PREFIX "bts %1,%0" | 81 | asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" |
82 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); | 82 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); |
83 | } | 83 | } |
84 | } | 84 | } |
@@ -94,7 +94,7 @@ set_bit(long nr, volatile unsigned long *addr) | |||
94 | */ | 94 | */ |
95 | static __always_inline void __set_bit(long nr, volatile unsigned long *addr) | 95 | static __always_inline void __set_bit(long nr, volatile unsigned long *addr) |
96 | { | 96 | { |
97 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); | 97 | asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); |
98 | } | 98 | } |
99 | 99 | ||
100 | /** | 100 | /** |
@@ -115,7 +115,7 @@ clear_bit(long nr, volatile unsigned long *addr) | |||
115 | : CONST_MASK_ADDR(nr, addr) | 115 | : CONST_MASK_ADDR(nr, addr) |
116 | : "iq" ((u8)~CONST_MASK(nr))); | 116 | : "iq" ((u8)~CONST_MASK(nr))); |
117 | } else { | 117 | } else { |
118 | asm volatile(LOCK_PREFIX "btr %1,%0" | 118 | asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" |
119 | : BITOP_ADDR(addr) | 119 | : BITOP_ADDR(addr) |
120 | : "Ir" (nr)); | 120 | : "Ir" (nr)); |
121 | } | 121 | } |
@@ -137,7 +137,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad | |||
137 | 137 | ||
138 | static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) | 138 | static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) |
139 | { | 139 | { |
140 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); | 140 | asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); |
141 | } | 141 | } |
142 | 142 | ||
143 | static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) | 143 | static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) |
@@ -182,7 +182,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long * | |||
182 | */ | 182 | */ |
183 | static __always_inline void __change_bit(long nr, volatile unsigned long *addr) | 183 | static __always_inline void __change_bit(long nr, volatile unsigned long *addr) |
184 | { | 184 | { |
185 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); | 185 | asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); |
186 | } | 186 | } |
187 | 187 | ||
188 | /** | 188 | /** |
@@ -201,7 +201,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) | |||
201 | : CONST_MASK_ADDR(nr, addr) | 201 | : CONST_MASK_ADDR(nr, addr) |
202 | : "iq" ((u8)CONST_MASK(nr))); | 202 | : "iq" ((u8)CONST_MASK(nr))); |
203 | } else { | 203 | } else { |
204 | asm volatile(LOCK_PREFIX "btc %1,%0" | 204 | asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" |
205 | : BITOP_ADDR(addr) | 205 | : BITOP_ADDR(addr) |
206 | : "Ir" (nr)); | 206 | : "Ir" (nr)); |
207 | } | 207 | } |
@@ -217,7 +217,8 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) | |||
217 | */ | 217 | */ |
218 | static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) | 218 | static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) |
219 | { | 219 | { |
220 | GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); | 220 | GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), |
221 | *addr, "Ir", nr, "%0", c); | ||
221 | } | 222 | } |
222 | 223 | ||
223 | /** | 224 | /** |
@@ -246,7 +247,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * | |||
246 | { | 247 | { |
247 | bool oldbit; | 248 | bool oldbit; |
248 | 249 | ||
249 | asm("bts %2,%1" | 250 | asm(__ASM_SIZE(bts) " %2,%1" |
250 | CC_SET(c) | 251 | CC_SET(c) |
251 | : CC_OUT(c) (oldbit), ADDR | 252 | : CC_OUT(c) (oldbit), ADDR |
252 | : "Ir" (nr)); | 253 | : "Ir" (nr)); |
@@ -263,7 +264,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * | |||
263 | */ | 264 | */ |
264 | static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) | 265 | static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) |
265 | { | 266 | { |
266 | GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); | 267 | GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), |
268 | *addr, "Ir", nr, "%0", c); | ||
267 | } | 269 | } |
268 | 270 | ||
269 | /** | 271 | /** |
@@ -286,7 +288,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long | |||
286 | { | 288 | { |
287 | bool oldbit; | 289 | bool oldbit; |
288 | 290 | ||
289 | asm volatile("btr %2,%1" | 291 | asm volatile(__ASM_SIZE(btr) " %2,%1" |
290 | CC_SET(c) | 292 | CC_SET(c) |
291 | : CC_OUT(c) (oldbit), ADDR | 293 | : CC_OUT(c) (oldbit), ADDR |
292 | : "Ir" (nr)); | 294 | : "Ir" (nr)); |
@@ -298,7 +300,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon | |||
298 | { | 300 | { |
299 | bool oldbit; | 301 | bool oldbit; |
300 | 302 | ||
301 | asm volatile("btc %2,%1" | 303 | asm volatile(__ASM_SIZE(btc) " %2,%1" |
302 | CC_SET(c) | 304 | CC_SET(c) |
303 | : CC_OUT(c) (oldbit), ADDR | 305 | : CC_OUT(c) (oldbit), ADDR |
304 | : "Ir" (nr) : "memory"); | 306 | : "Ir" (nr) : "memory"); |
@@ -316,7 +318,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon | |||
316 | */ | 318 | */ |
317 | static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) | 319 | static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) |
318 | { | 320 | { |
319 | GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); | 321 | GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), |
322 | *addr, "Ir", nr, "%0", c); | ||
320 | } | 323 | } |
321 | 324 | ||
322 | static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) | 325 | static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) |
@@ -329,7 +332,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l | |||
329 | { | 332 | { |
330 | bool oldbit; | 333 | bool oldbit; |
331 | 334 | ||
332 | asm volatile("bt %2,%1" | 335 | asm volatile(__ASM_SIZE(bt) " %2,%1" |
333 | CC_SET(c) | 336 | CC_SET(c) |
334 | : CC_OUT(c) (oldbit) | 337 | : CC_OUT(c) (oldbit) |
335 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | 338 | : "m" (*(unsigned long *)addr), "Ir" (nr)); |
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 0dfe4d3f74e2..d554c11e01ff 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h | |||
@@ -213,6 +213,7 @@ | |||
213 | #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ | 213 | #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ |
214 | 214 | ||
215 | #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ | 215 | #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ |
216 | #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ | ||
216 | 217 | ||
217 | /* Virtualization flags: Linux defined, word 8 */ | 218 | /* Virtualization flags: Linux defined, word 8 */ |
218 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ | 219 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
@@ -315,6 +316,7 @@ | |||
315 | #define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ | 316 | #define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ |
316 | #define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ | 317 | #define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ |
317 | #define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ | 318 | #define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ |
319 | #define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */ | ||
318 | #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ | 320 | #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ |
319 | #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ | 321 | #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ |
320 | #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ | 322 | #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ |
@@ -327,6 +329,7 @@ | |||
327 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ | 329 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ |
328 | #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ | 330 | #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ |
329 | #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ | 331 | #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ |
332 | #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ | ||
330 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ | 333 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ |
331 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ | 334 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ |
332 | #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ | 335 | #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 85f6ccb80b91..a399c1ebf6f0 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <asm/pgtable.h> | 6 | #include <asm/pgtable.h> |
7 | #include <asm/processor-flags.h> | 7 | #include <asm/processor-flags.h> |
8 | #include <asm/tlb.h> | 8 | #include <asm/tlb.h> |
9 | #include <asm/nospec-branch.h> | ||
9 | 10 | ||
10 | /* | 11 | /* |
11 | * We map the EFI regions needed for runtime services non-contiguously, | 12 | * We map the EFI regions needed for runtime services non-contiguously, |
@@ -36,8 +37,18 @@ | |||
36 | 37 | ||
37 | extern asmlinkage unsigned long efi_call_phys(void *, ...); | 38 | extern asmlinkage unsigned long efi_call_phys(void *, ...); |
38 | 39 | ||
39 | #define arch_efi_call_virt_setup() kernel_fpu_begin() | 40 | #define arch_efi_call_virt_setup() \ |
40 | #define arch_efi_call_virt_teardown() kernel_fpu_end() | 41 | ({ \ |
42 | kernel_fpu_begin(); \ | ||
43 | firmware_restrict_branch_speculation_start(); \ | ||
44 | }) | ||
45 | |||
46 | #define arch_efi_call_virt_teardown() \ | ||
47 | ({ \ | ||
48 | firmware_restrict_branch_speculation_end(); \ | ||
49 | kernel_fpu_end(); \ | ||
50 | }) | ||
51 | |||
41 | 52 | ||
42 | /* | 53 | /* |
43 | * Wrap all the virtual calls in a way that forces the parameters on the stack. | 54 | * Wrap all the virtual calls in a way that forces the parameters on the stack. |
@@ -73,6 +84,7 @@ struct efi_scratch { | |||
73 | efi_sync_low_kernel_mappings(); \ | 84 | efi_sync_low_kernel_mappings(); \ |
74 | preempt_disable(); \ | 85 | preempt_disable(); \ |
75 | __kernel_fpu_begin(); \ | 86 | __kernel_fpu_begin(); \ |
87 | firmware_restrict_branch_speculation_start(); \ | ||
76 | \ | 88 | \ |
77 | if (efi_scratch.use_pgd) { \ | 89 | if (efi_scratch.use_pgd) { \ |
78 | efi_scratch.prev_cr3 = __read_cr3(); \ | 90 | efi_scratch.prev_cr3 = __read_cr3(); \ |
@@ -91,6 +103,7 @@ struct efi_scratch { | |||
91 | __flush_tlb_all(); \ | 103 | __flush_tlb_all(); \ |
92 | } \ | 104 | } \ |
93 | \ | 105 | \ |
106 | firmware_restrict_branch_speculation_end(); \ | ||
94 | __kernel_fpu_end(); \ | 107 | __kernel_fpu_end(); \ |
95 | preempt_enable(); \ | 108 | preempt_enable(); \ |
96 | }) | 109 | }) |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index dd6f57a54a26..b605a5b6a30c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -507,6 +507,7 @@ struct kvm_vcpu_arch { | |||
507 | u64 smi_count; | 507 | u64 smi_count; |
508 | bool tpr_access_reporting; | 508 | bool tpr_access_reporting; |
509 | u64 ia32_xss; | 509 | u64 ia32_xss; |
510 | u64 microcode_version; | ||
510 | 511 | ||
511 | /* | 512 | /* |
512 | * Paging state of the vcpu | 513 | * Paging state of the vcpu |
@@ -1095,6 +1096,8 @@ struct kvm_x86_ops { | |||
1095 | int (*mem_enc_op)(struct kvm *kvm, void __user *argp); | 1096 | int (*mem_enc_op)(struct kvm *kvm, void __user *argp); |
1096 | int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); | 1097 | int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); |
1097 | int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); | 1098 | int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); |
1099 | |||
1100 | int (*get_msr_feature)(struct kvm_msr_entry *entry); | ||
1098 | }; | 1101 | }; |
1099 | 1102 | ||
1100 | struct kvm_arch_async_pf { | 1103 | struct kvm_arch_async_pf { |
@@ -1464,7 +1467,4 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) | |||
1464 | #define put_smstate(type, buf, offset, val) \ | 1467 | #define put_smstate(type, buf, offset, val) \ |
1465 | *(type *)((buf) + (offset) - 0x7e00) = val | 1468 | *(type *)((buf) + (offset) - 0x7e00) = val |
1466 | 1469 | ||
1467 | void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, | ||
1468 | unsigned long start, unsigned long end); | ||
1469 | |||
1470 | #endif /* _ASM_X86_KVM_HOST_H */ | 1470 | #endif /* _ASM_X86_KVM_HOST_H */ |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 55520cec8b27..6cf0e4cb7b97 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -37,7 +37,13 @@ struct cpu_signature { | |||
37 | 37 | ||
38 | struct device; | 38 | struct device; |
39 | 39 | ||
40 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; | 40 | enum ucode_state { |
41 | UCODE_OK = 0, | ||
42 | UCODE_NEW, | ||
43 | UCODE_UPDATED, | ||
44 | UCODE_NFOUND, | ||
45 | UCODE_ERROR, | ||
46 | }; | ||
41 | 47 | ||
42 | struct microcode_ops { | 48 | struct microcode_ops { |
43 | enum ucode_state (*request_microcode_user) (int cpu, | 49 | enum ucode_state (*request_microcode_user) (int cpu, |
@@ -54,7 +60,7 @@ struct microcode_ops { | |||
54 | * are being called. | 60 | * are being called. |
55 | * See also the "Synchronization" section in microcode_core.c. | 61 | * See also the "Synchronization" section in microcode_core.c. |
56 | */ | 62 | */ |
57 | int (*apply_microcode) (int cpu); | 63 | enum ucode_state (*apply_microcode) (int cpu); |
58 | int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); | 64 | int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); |
59 | }; | 65 | }; |
60 | 66 | ||
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index c931b88982a0..1de72ce514cd 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -74,6 +74,7 @@ static inline void *ldt_slot_va(int slot) | |||
74 | return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); | 74 | return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); |
75 | #else | 75 | #else |
76 | BUG(); | 76 | BUG(); |
77 | return (void *)fix_to_virt(FIX_HOLE); | ||
77 | #endif | 78 | #endif |
78 | } | 79 | } |
79 | 80 | ||
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 81a1be326571..f928ad9b143f 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h | |||
@@ -8,6 +8,50 @@ | |||
8 | #include <asm/cpufeatures.h> | 8 | #include <asm/cpufeatures.h> |
9 | #include <asm/msr-index.h> | 9 | #include <asm/msr-index.h> |
10 | 10 | ||
11 | /* | ||
12 | * Fill the CPU return stack buffer. | ||
13 | * | ||
14 | * Each entry in the RSB, if used for a speculative 'ret', contains an | ||
15 | * infinite 'pause; lfence; jmp' loop to capture speculative execution. | ||
16 | * | ||
17 | * This is required in various cases for retpoline and IBRS-based | ||
18 | * mitigations for the Spectre variant 2 vulnerability. Sometimes to | ||
19 | * eliminate potentially bogus entries from the RSB, and sometimes | ||
20 | * purely to ensure that it doesn't get empty, which on some CPUs would | ||
21 | * allow predictions from other (unwanted!) sources to be used. | ||
22 | * | ||
23 | * We define a CPP macro such that it can be used from both .S files and | ||
24 | * inline assembly. It's possible to do a .macro and then include that | ||
25 | * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. | ||
26 | */ | ||
27 | |||
28 | #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ | ||
29 | #define RSB_FILL_LOOPS 16 /* To avoid underflow */ | ||
30 | |||
31 | /* | ||
32 | * Google experimented with loop-unrolling and this turned out to be | ||
33 | * the optimal version — two calls, each with their own speculation | ||
34 | * trap should their return address end up getting used, in a loop. | ||
35 | */ | ||
36 | #define __FILL_RETURN_BUFFER(reg, nr, sp) \ | ||
37 | mov $(nr/2), reg; \ | ||
38 | 771: \ | ||
39 | call 772f; \ | ||
40 | 773: /* speculation trap */ \ | ||
41 | pause; \ | ||
42 | lfence; \ | ||
43 | jmp 773b; \ | ||
44 | 772: \ | ||
45 | call 774f; \ | ||
46 | 775: /* speculation trap */ \ | ||
47 | pause; \ | ||
48 | lfence; \ | ||
49 | jmp 775b; \ | ||
50 | 774: \ | ||
51 | dec reg; \ | ||
52 | jnz 771b; \ | ||
53 | add $(BITS_PER_LONG/8) * nr, sp; | ||
54 | |||
11 | #ifdef __ASSEMBLY__ | 55 | #ifdef __ASSEMBLY__ |
12 | 56 | ||
13 | /* | 57 | /* |
@@ -24,6 +68,18 @@ | |||
24 | .endm | 68 | .endm |
25 | 69 | ||
26 | /* | 70 | /* |
71 | * This should be used immediately before an indirect jump/call. It tells | ||
72 | * objtool the subsequent indirect jump/call is vouched safe for retpoline | ||
73 | * builds. | ||
74 | */ | ||
75 | .macro ANNOTATE_RETPOLINE_SAFE | ||
76 | .Lannotate_\@: | ||
77 | .pushsection .discard.retpoline_safe | ||
78 | _ASM_PTR .Lannotate_\@ | ||
79 | .popsection | ||
80 | .endm | ||
81 | |||
82 | /* | ||
27 | * These are the bare retpoline primitives for indirect jmp and call. | 83 | * These are the bare retpoline primitives for indirect jmp and call. |
28 | * Do not use these directly; they only exist to make the ALTERNATIVE | 84 | * Do not use these directly; they only exist to make the ALTERNATIVE |
29 | * invocation below less ugly. | 85 | * invocation below less ugly. |
@@ -59,9 +115,9 @@ | |||
59 | .macro JMP_NOSPEC reg:req | 115 | .macro JMP_NOSPEC reg:req |
60 | #ifdef CONFIG_RETPOLINE | 116 | #ifdef CONFIG_RETPOLINE |
61 | ANNOTATE_NOSPEC_ALTERNATIVE | 117 | ANNOTATE_NOSPEC_ALTERNATIVE |
62 | ALTERNATIVE_2 __stringify(jmp *\reg), \ | 118 | ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \ |
63 | __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ | 119 | __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ |
64 | __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD | 120 | __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD |
65 | #else | 121 | #else |
66 | jmp *\reg | 122 | jmp *\reg |
67 | #endif | 123 | #endif |
@@ -70,18 +126,25 @@ | |||
70 | .macro CALL_NOSPEC reg:req | 126 | .macro CALL_NOSPEC reg:req |
71 | #ifdef CONFIG_RETPOLINE | 127 | #ifdef CONFIG_RETPOLINE |
72 | ANNOTATE_NOSPEC_ALTERNATIVE | 128 | ANNOTATE_NOSPEC_ALTERNATIVE |
73 | ALTERNATIVE_2 __stringify(call *\reg), \ | 129 | ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \ |
74 | __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ | 130 | __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ |
75 | __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD | 131 | __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD |
76 | #else | 132 | #else |
77 | call *\reg | 133 | call *\reg |
78 | #endif | 134 | #endif |
79 | .endm | 135 | .endm |
80 | 136 | ||
81 | /* This clobbers the BX register */ | 137 | /* |
82 | .macro FILL_RETURN_BUFFER nr:req ftr:req | 138 | * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP |
139 | * monstrosity above, manually. | ||
140 | */ | ||
141 | .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req | ||
83 | #ifdef CONFIG_RETPOLINE | 142 | #ifdef CONFIG_RETPOLINE |
84 | ALTERNATIVE "", "call __clear_rsb", \ftr | 143 | ANNOTATE_NOSPEC_ALTERNATIVE |
144 | ALTERNATIVE "jmp .Lskip_rsb_\@", \ | ||
145 | __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ | ||
146 | \ftr | ||
147 | .Lskip_rsb_\@: | ||
85 | #endif | 148 | #endif |
86 | .endm | 149 | .endm |
87 | 150 | ||
@@ -93,6 +156,12 @@ | |||
93 | ".long 999b - .\n\t" \ | 156 | ".long 999b - .\n\t" \ |
94 | ".popsection\n\t" | 157 | ".popsection\n\t" |
95 | 158 | ||
159 | #define ANNOTATE_RETPOLINE_SAFE \ | ||
160 | "999:\n\t" \ | ||
161 | ".pushsection .discard.retpoline_safe\n\t" \ | ||
162 | _ASM_PTR " 999b\n\t" \ | ||
163 | ".popsection\n\t" | ||
164 | |||
96 | #if defined(CONFIG_X86_64) && defined(RETPOLINE) | 165 | #if defined(CONFIG_X86_64) && defined(RETPOLINE) |
97 | 166 | ||
98 | /* | 167 | /* |
@@ -102,6 +171,7 @@ | |||
102 | # define CALL_NOSPEC \ | 171 | # define CALL_NOSPEC \ |
103 | ANNOTATE_NOSPEC_ALTERNATIVE \ | 172 | ANNOTATE_NOSPEC_ALTERNATIVE \ |
104 | ALTERNATIVE( \ | 173 | ALTERNATIVE( \ |
174 | ANNOTATE_RETPOLINE_SAFE \ | ||
105 | "call *%[thunk_target]\n", \ | 175 | "call *%[thunk_target]\n", \ |
106 | "call __x86_indirect_thunk_%V[thunk_target]\n", \ | 176 | "call __x86_indirect_thunk_%V[thunk_target]\n", \ |
107 | X86_FEATURE_RETPOLINE) | 177 | X86_FEATURE_RETPOLINE) |
@@ -113,7 +183,10 @@ | |||
113 | * otherwise we'll run out of registers. We don't care about CET | 183 | * otherwise we'll run out of registers. We don't care about CET |
114 | * here, anyway. | 184 | * here, anyway. |
115 | */ | 185 | */ |
116 | # define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \ | 186 | # define CALL_NOSPEC \ |
187 | ALTERNATIVE( \ | ||
188 | ANNOTATE_RETPOLINE_SAFE \ | ||
189 | "call *%[thunk_target]\n", \ | ||
117 | " jmp 904f;\n" \ | 190 | " jmp 904f;\n" \ |
118 | " .align 16\n" \ | 191 | " .align 16\n" \ |
119 | "901: call 903f;\n" \ | 192 | "901: call 903f;\n" \ |
@@ -156,26 +229,54 @@ extern char __indirect_thunk_end[]; | |||
156 | static inline void vmexit_fill_RSB(void) | 229 | static inline void vmexit_fill_RSB(void) |
157 | { | 230 | { |
158 | #ifdef CONFIG_RETPOLINE | 231 | #ifdef CONFIG_RETPOLINE |
159 | alternative_input("", | 232 | unsigned long loops; |
160 | "call __fill_rsb", | 233 | |
161 | X86_FEATURE_RETPOLINE, | 234 | asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE |
162 | ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory")); | 235 | ALTERNATIVE("jmp 910f", |
236 | __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), | ||
237 | X86_FEATURE_RETPOLINE) | ||
238 | "910:" | ||
239 | : "=r" (loops), ASM_CALL_CONSTRAINT | ||
240 | : : "memory" ); | ||
163 | #endif | 241 | #endif |
164 | } | 242 | } |
165 | 243 | ||
244 | #define alternative_msr_write(_msr, _val, _feature) \ | ||
245 | asm volatile(ALTERNATIVE("", \ | ||
246 | "movl %[msr], %%ecx\n\t" \ | ||
247 | "movl %[val], %%eax\n\t" \ | ||
248 | "movl $0, %%edx\n\t" \ | ||
249 | "wrmsr", \ | ||
250 | _feature) \ | ||
251 | : : [msr] "i" (_msr), [val] "i" (_val) \ | ||
252 | : "eax", "ecx", "edx", "memory") | ||
253 | |||
166 | static inline void indirect_branch_prediction_barrier(void) | 254 | static inline void indirect_branch_prediction_barrier(void) |
167 | { | 255 | { |
168 | asm volatile(ALTERNATIVE("", | 256 | alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, |
169 | "movl %[msr], %%ecx\n\t" | 257 | X86_FEATURE_USE_IBPB); |
170 | "movl %[val], %%eax\n\t" | ||
171 | "movl $0, %%edx\n\t" | ||
172 | "wrmsr", | ||
173 | X86_FEATURE_USE_IBPB) | ||
174 | : : [msr] "i" (MSR_IA32_PRED_CMD), | ||
175 | [val] "i" (PRED_CMD_IBPB) | ||
176 | : "eax", "ecx", "edx", "memory"); | ||
177 | } | 258 | } |
178 | 259 | ||
260 | /* | ||
261 | * With retpoline, we must use IBRS to restrict branch prediction | ||
262 | * before calling into firmware. | ||
263 | * | ||
264 | * (Implemented as CPP macros due to header hell.) | ||
265 | */ | ||
266 | #define firmware_restrict_branch_speculation_start() \ | ||
267 | do { \ | ||
268 | preempt_disable(); \ | ||
269 | alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \ | ||
270 | X86_FEATURE_USE_IBRS_FW); \ | ||
271 | } while (0) | ||
272 | |||
273 | #define firmware_restrict_branch_speculation_end() \ | ||
274 | do { \ | ||
275 | alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \ | ||
276 | X86_FEATURE_USE_IBRS_FW); \ | ||
277 | preempt_enable(); \ | ||
278 | } while (0) | ||
279 | |||
179 | #endif /* __ASSEMBLY__ */ | 280 | #endif /* __ASSEMBLY__ */ |
180 | 281 | ||
181 | /* | 282 | /* |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 554841fab717..c83a2f418cea 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #ifdef CONFIG_PARAVIRT | 7 | #ifdef CONFIG_PARAVIRT |
8 | #include <asm/pgtable_types.h> | 8 | #include <asm/pgtable_types.h> |
9 | #include <asm/asm.h> | 9 | #include <asm/asm.h> |
10 | #include <asm/nospec-branch.h> | ||
10 | 11 | ||
11 | #include <asm/paravirt_types.h> | 12 | #include <asm/paravirt_types.h> |
12 | 13 | ||
@@ -879,23 +880,27 @@ extern void default_banner(void); | |||
879 | 880 | ||
880 | #define INTERRUPT_RETURN \ | 881 | #define INTERRUPT_RETURN \ |
881 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ | 882 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ |
882 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) | 883 | ANNOTATE_RETPOLINE_SAFE; \ |
884 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);) | ||
883 | 885 | ||
884 | #define DISABLE_INTERRUPTS(clobbers) \ | 886 | #define DISABLE_INTERRUPTS(clobbers) \ |
885 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ | 887 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ |
886 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ | 888 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ |
889 | ANNOTATE_RETPOLINE_SAFE; \ | ||
887 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ | 890 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ |
888 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) | 891 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) |
889 | 892 | ||
890 | #define ENABLE_INTERRUPTS(clobbers) \ | 893 | #define ENABLE_INTERRUPTS(clobbers) \ |
891 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ | 894 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ |
892 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ | 895 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ |
896 | ANNOTATE_RETPOLINE_SAFE; \ | ||
893 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ | 897 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ |
894 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) | 898 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) |
895 | 899 | ||
896 | #ifdef CONFIG_X86_32 | 900 | #ifdef CONFIG_X86_32 |
897 | #define GET_CR0_INTO_EAX \ | 901 | #define GET_CR0_INTO_EAX \ |
898 | push %ecx; push %edx; \ | 902 | push %ecx; push %edx; \ |
903 | ANNOTATE_RETPOLINE_SAFE; \ | ||
899 | call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ | 904 | call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ |
900 | pop %edx; pop %ecx | 905 | pop %edx; pop %ecx |
901 | #else /* !CONFIG_X86_32 */ | 906 | #else /* !CONFIG_X86_32 */ |
@@ -917,21 +922,25 @@ extern void default_banner(void); | |||
917 | */ | 922 | */ |
918 | #define SWAPGS \ | 923 | #define SWAPGS \ |
919 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ | 924 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ |
920 | call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ | 925 | ANNOTATE_RETPOLINE_SAFE; \ |
926 | call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \ | ||
921 | ) | 927 | ) |
922 | 928 | ||
923 | #define GET_CR2_INTO_RAX \ | 929 | #define GET_CR2_INTO_RAX \ |
924 | call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2) | 930 | ANNOTATE_RETPOLINE_SAFE; \ |
931 | call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); | ||
925 | 932 | ||
926 | #define USERGS_SYSRET64 \ | 933 | #define USERGS_SYSRET64 \ |
927 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ | 934 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ |
928 | CLBR_NONE, \ | 935 | CLBR_NONE, \ |
929 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) | 936 | ANNOTATE_RETPOLINE_SAFE; \ |
937 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);) | ||
930 | 938 | ||
931 | #ifdef CONFIG_DEBUG_ENTRY | 939 | #ifdef CONFIG_DEBUG_ENTRY |
932 | #define SAVE_FLAGS(clobbers) \ | 940 | #define SAVE_FLAGS(clobbers) \ |
933 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \ | 941 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \ |
934 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ | 942 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ |
943 | ANNOTATE_RETPOLINE_SAFE; \ | ||
935 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \ | 944 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \ |
936 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) | 945 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) |
937 | #endif | 946 | #endif |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index f624f1f10316..180bc0bff0fb 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <asm/desc_defs.h> | 43 | #include <asm/desc_defs.h> |
44 | #include <asm/kmap_types.h> | 44 | #include <asm/kmap_types.h> |
45 | #include <asm/pgtable_types.h> | 45 | #include <asm/pgtable_types.h> |
46 | #include <asm/nospec-branch.h> | ||
46 | 47 | ||
47 | struct page; | 48 | struct page; |
48 | struct thread_struct; | 49 | struct thread_struct; |
@@ -392,7 +393,9 @@ int paravirt_disable_iospace(void); | |||
392 | * offset into the paravirt_patch_template structure, and can therefore be | 393 | * offset into the paravirt_patch_template structure, and can therefore be |
393 | * freely converted back into a structure offset. | 394 | * freely converted back into a structure offset. |
394 | */ | 395 | */ |
395 | #define PARAVIRT_CALL "call *%c[paravirt_opptr];" | 396 | #define PARAVIRT_CALL \ |
397 | ANNOTATE_RETPOLINE_SAFE \ | ||
398 | "call *%c[paravirt_opptr];" | ||
396 | 399 | ||
397 | /* | 400 | /* |
398 | * These macros are intended to wrap calls through one of the paravirt | 401 | * These macros are intended to wrap calls through one of the paravirt |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index ba3c523aaf16..a06b07399d17 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -526,7 +526,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr, | |||
526 | { | 526 | { |
527 | bool oldbit; | 527 | bool oldbit; |
528 | 528 | ||
529 | asm volatile("bt "__percpu_arg(2)",%1" | 529 | asm volatile("btl "__percpu_arg(2)",%1" |
530 | CC_SET(c) | 530 | CC_SET(c) |
531 | : CC_OUT(c) (oldbit) | 531 | : CC_OUT(c) (oldbit) |
532 | : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); | 532 | : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 63c2552b6b65..b444d83cfc95 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -350,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) | |||
350 | { | 350 | { |
351 | pmdval_t v = native_pmd_val(pmd); | 351 | pmdval_t v = native_pmd_val(pmd); |
352 | 352 | ||
353 | return __pmd(v | set); | 353 | return native_make_pmd(v | set); |
354 | } | 354 | } |
355 | 355 | ||
356 | static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) | 356 | static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) |
357 | { | 357 | { |
358 | pmdval_t v = native_pmd_val(pmd); | 358 | pmdval_t v = native_pmd_val(pmd); |
359 | 359 | ||
360 | return __pmd(v & ~clear); | 360 | return native_make_pmd(v & ~clear); |
361 | } | 361 | } |
362 | 362 | ||
363 | static inline pmd_t pmd_mkold(pmd_t pmd) | 363 | static inline pmd_t pmd_mkold(pmd_t pmd) |
@@ -409,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set) | |||
409 | { | 409 | { |
410 | pudval_t v = native_pud_val(pud); | 410 | pudval_t v = native_pud_val(pud); |
411 | 411 | ||
412 | return __pud(v | set); | 412 | return native_make_pud(v | set); |
413 | } | 413 | } |
414 | 414 | ||
415 | static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) | 415 | static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) |
416 | { | 416 | { |
417 | pudval_t v = native_pud_val(pud); | 417 | pudval_t v = native_pud_val(pud); |
418 | 418 | ||
419 | return __pud(v & ~clear); | 419 | return native_make_pud(v & ~clear); |
420 | } | 420 | } |
421 | 421 | ||
422 | static inline pud_t pud_mkold(pud_t pud) | 422 | static inline pud_t pud_mkold(pud_t pud) |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index e55466760ff8..b3ec519e3982 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[]; | |||
32 | static inline void pgtable_cache_init(void) { } | 32 | static inline void pgtable_cache_init(void) { } |
33 | static inline void check_pgt_cache(void) { } | 33 | static inline void check_pgt_cache(void) { } |
34 | void paging_init(void); | 34 | void paging_init(void); |
35 | void sync_initial_page_table(void); | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * Define this if things work differently on an i386 and an i486: | 38 | * Define this if things work differently on an i386 and an i486: |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 81462e9a34f6..1149d2112b2e 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[]; | |||
28 | #define swapper_pg_dir init_top_pgt | 28 | #define swapper_pg_dir init_top_pgt |
29 | 29 | ||
30 | extern void paging_init(void); | 30 | extern void paging_init(void); |
31 | static inline void sync_initial_page_table(void) { } | ||
31 | 32 | ||
32 | #define pte_ERROR(e) \ | 33 | #define pte_ERROR(e) \ |
33 | pr_err("%s:%d: bad pte %p(%016lx)\n", \ | 34 | pr_err("%s:%d: bad pte %p(%016lx)\n", \ |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 3696398a9475..acfe755562a6 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -174,7 +174,6 @@ enum page_cache_mode { | |||
174 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | 174 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) |
175 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | 175 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) |
176 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) | 176 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) |
177 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) | ||
178 | #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) | 177 | #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) |
179 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | 178 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) |
180 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | 179 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) |
@@ -206,7 +205,6 @@ enum page_cache_mode { | |||
206 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) | 205 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) |
207 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) | 206 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) |
208 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) | 207 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) |
209 | #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC) | ||
210 | #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) | 208 | #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) |
211 | 209 | ||
212 | #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) | 210 | #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) |
@@ -323,6 +321,11 @@ static inline pudval_t native_pud_val(pud_t pud) | |||
323 | #else | 321 | #else |
324 | #include <asm-generic/pgtable-nopud.h> | 322 | #include <asm-generic/pgtable-nopud.h> |
325 | 323 | ||
324 | static inline pud_t native_make_pud(pudval_t val) | ||
325 | { | ||
326 | return (pud_t) { .p4d.pgd = native_make_pgd(val) }; | ||
327 | } | ||
328 | |||
326 | static inline pudval_t native_pud_val(pud_t pud) | 329 | static inline pudval_t native_pud_val(pud_t pud) |
327 | { | 330 | { |
328 | return native_pgd_val(pud.p4d.pgd); | 331 | return native_pgd_val(pud.p4d.pgd); |
@@ -344,6 +347,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) | |||
344 | #else | 347 | #else |
345 | #include <asm-generic/pgtable-nopmd.h> | 348 | #include <asm-generic/pgtable-nopmd.h> |
346 | 349 | ||
350 | static inline pmd_t native_make_pmd(pmdval_t val) | ||
351 | { | ||
352 | return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) }; | ||
353 | } | ||
354 | |||
347 | static inline pmdval_t native_pmd_val(pmd_t pmd) | 355 | static inline pmdval_t native_pmd_val(pmd_t pmd) |
348 | { | 356 | { |
349 | return native_pgd_val(pmd.pud.p4d.pgd); | 357 | return native_pgd_val(pmd.pud.p4d.pgd); |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1bd9ed87606f..b0ccd4847a58 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -977,4 +977,5 @@ bool xen_set_default_idle(void); | |||
977 | 977 | ||
978 | void stop_this_cpu(void *dummy); | 978 | void stop_this_cpu(void *dummy); |
979 | void df_debug(struct pt_regs *regs, long error_code); | 979 | void df_debug(struct pt_regs *regs, long error_code); |
980 | void microcode_check(void); | ||
980 | #endif /* _ASM_X86_PROCESSOR_H */ | 981 | #endif /* _ASM_X86_PROCESSOR_H */ |
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index 4e44250e7d0d..4cf11d88d3b3 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #define _REFCOUNT_EXCEPTION \ | 17 | #define _REFCOUNT_EXCEPTION \ |
18 | ".pushsection .text..refcount\n" \ | 18 | ".pushsection .text..refcount\n" \ |
19 | "111:\tlea %[counter], %%" _ASM_CX "\n" \ | 19 | "111:\tlea %[counter], %%" _ASM_CX "\n" \ |
20 | "112:\t" ASM_UD0 "\n" \ | 20 | "112:\t" ASM_UD2 "\n" \ |
21 | ASM_UNREACHABLE \ | 21 | ASM_UNREACHABLE \ |
22 | ".popsection\n" \ | 22 | ".popsection\n" \ |
23 | "113:\n" \ | 23 | "113:\n" \ |
@@ -67,13 +67,13 @@ static __always_inline __must_check | |||
67 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) | 67 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) |
68 | { | 68 | { |
69 | GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, | 69 | GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, |
70 | r->refs.counter, "er", i, "%0", e); | 70 | r->refs.counter, "er", i, "%0", e, "cx"); |
71 | } | 71 | } |
72 | 72 | ||
73 | static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) | 73 | static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) |
74 | { | 74 | { |
75 | GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO, | 75 | GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO, |
76 | r->refs.counter, "%0", e); | 76 | r->refs.counter, "%0", e, "cx"); |
77 | } | 77 | } |
78 | 78 | ||
79 | static __always_inline __must_check | 79 | static __always_inline __must_check |
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index f91c365e57c3..4914a3e7c803 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h | |||
@@ -2,8 +2,7 @@ | |||
2 | #ifndef _ASM_X86_RMWcc | 2 | #ifndef _ASM_X86_RMWcc |
3 | #define _ASM_X86_RMWcc | 3 | #define _ASM_X86_RMWcc |
4 | 4 | ||
5 | #define __CLOBBERS_MEM "memory" | 5 | #define __CLOBBERS_MEM(clb...) "memory", ## clb |
6 | #define __CLOBBERS_MEM_CC_CX "memory", "cc", "cx" | ||
7 | 6 | ||
8 | #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) | 7 | #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) |
9 | 8 | ||
@@ -40,18 +39,19 @@ do { \ | |||
40 | #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ | 39 | #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ |
41 | 40 | ||
42 | #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ | 41 | #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ |
43 | __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM) | 42 | __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) |
44 | 43 | ||
45 | #define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc) \ | 44 | #define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\ |
46 | __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \ | 45 | __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \ |
47 | __CLOBBERS_MEM_CC_CX) | 46 | __CLOBBERS_MEM(clobbers)) |
48 | 47 | ||
49 | #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ | 48 | #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ |
50 | __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \ | 49 | __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \ |
51 | __CLOBBERS_MEM, vcon (val)) | 50 | __CLOBBERS_MEM(), vcon (val)) |
52 | 51 | ||
53 | #define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc) \ | 52 | #define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \ |
53 | clobbers...) \ | ||
54 | __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \ | 54 | __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \ |
55 | __CLOBBERS_MEM_CC_CX, vcon (val)) | 55 | __CLOBBERS_MEM(clobbers), vcon (val)) |
56 | 56 | ||
57 | #endif /* _ASM_X86_RMWcc */ | 57 | #endif /* _ASM_X86_RMWcc */ |
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h index d6baf23782bc..5c019d23d06b 100644 --- a/arch/x86/include/asm/sections.h +++ b/arch/x86/include/asm/sections.h | |||
@@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[]; | |||
10 | 10 | ||
11 | #if defined(CONFIG_X86_64) | 11 | #if defined(CONFIG_X86_64) |
12 | extern char __end_rodata_hpage_align[]; | 12 | extern char __end_rodata_hpage_align[]; |
13 | extern char __entry_trampoline_start[], __entry_trampoline_end[]; | ||
13 | #endif | 14 | #endif |
14 | 15 | ||
15 | #endif /* _ASM_X86_SECTIONS_H */ | 16 | #endif /* _ASM_X86_SECTIONS_H */ |
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 82c34ee25a65..906794aa034e 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h | |||
@@ -20,31 +20,43 @@ | |||
20 | #include <asm/ia32.h> | 20 | #include <asm/ia32.h> |
21 | 21 | ||
22 | /* ia32/sys_ia32.c */ | 22 | /* ia32/sys_ia32.c */ |
23 | asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long); | 23 | asmlinkage long compat_sys_x86_truncate64(const char __user *, unsigned long, |
24 | asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); | 24 | unsigned long); |
25 | asmlinkage long compat_sys_x86_ftruncate64(unsigned int, unsigned long, | ||
26 | unsigned long); | ||
25 | 27 | ||
26 | asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *); | 28 | asmlinkage long compat_sys_x86_stat64(const char __user *, |
27 | asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *); | 29 | struct stat64 __user *); |
28 | asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); | 30 | asmlinkage long compat_sys_x86_lstat64(const char __user *, |
29 | asmlinkage long sys32_fstatat(unsigned int, const char __user *, | 31 | struct stat64 __user *); |
32 | asmlinkage long compat_sys_x86_fstat64(unsigned int, struct stat64 __user *); | ||
33 | asmlinkage long compat_sys_x86_fstatat(unsigned int, const char __user *, | ||
30 | struct stat64 __user *, int); | 34 | struct stat64 __user *, int); |
31 | struct mmap_arg_struct32; | 35 | struct mmap_arg_struct32; |
32 | asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *); | 36 | asmlinkage long compat_sys_x86_mmap(struct mmap_arg_struct32 __user *); |
33 | 37 | ||
34 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); | 38 | asmlinkage long compat_sys_x86_waitpid(compat_pid_t, unsigned int __user *, |
39 | int); | ||
35 | 40 | ||
36 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); | 41 | asmlinkage long compat_sys_x86_pread(unsigned int, char __user *, u32, u32, |
37 | asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); | 42 | u32); |
43 | asmlinkage long compat_sys_x86_pwrite(unsigned int, const char __user *, u32, | ||
44 | u32, u32); | ||
38 | 45 | ||
39 | long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); | 46 | asmlinkage long compat_sys_x86_fadvise64_64(int, __u32, __u32, __u32, __u32, |
40 | long sys32_vm86_warning(void); | 47 | int); |
41 | 48 | ||
42 | asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); | 49 | asmlinkage ssize_t compat_sys_x86_readahead(int, unsigned int, unsigned int, |
43 | asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, | 50 | size_t); |
44 | unsigned, unsigned, int); | 51 | asmlinkage long compat_sys_x86_sync_file_range(int, unsigned int, unsigned int, |
45 | asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); | 52 | unsigned int, unsigned int, |
46 | asmlinkage long sys32_fallocate(int, int, unsigned, | 53 | int); |
47 | unsigned, unsigned, unsigned); | 54 | asmlinkage long compat_sys_x86_fadvise64(int, unsigned int, unsigned int, |
55 | size_t, int); | ||
56 | asmlinkage long compat_sys_x86_fallocate(int, int, unsigned int, unsigned int, | ||
57 | unsigned int, unsigned int); | ||
58 | asmlinkage long compat_sys_x86_clone(unsigned long, unsigned long, int __user *, | ||
59 | unsigned long, int __user *); | ||
48 | 60 | ||
49 | /* ia32/ia32_signal.c */ | 61 | /* ia32/ia32_signal.c */ |
50 | asmlinkage long sys32_sigreturn(void); | 62 | asmlinkage long sys32_sigreturn(void); |
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 7a2ade4aa235..6cfa9c8cb7d6 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #define KVM_FEATURE_PV_EOI 6 | 26 | #define KVM_FEATURE_PV_EOI 6 |
27 | #define KVM_FEATURE_PV_UNHALT 7 | 27 | #define KVM_FEATURE_PV_UNHALT 7 |
28 | #define KVM_FEATURE_PV_TLB_FLUSH 9 | 28 | #define KVM_FEATURE_PV_TLB_FLUSH 9 |
29 | #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 | ||
29 | 30 | ||
30 | /* The last 8 bits are used to indicate how to interpret the flags field | 31 | /* The last 8 bits are used to indicate how to interpret the flags field |
31 | * in pvclock structure. If no bits are set, all flags are ignored. | 32 | * in pvclock structure. If no bits are set, all flags are ignored. |
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 91723461dc1f..435db58a7bad 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h | |||
@@ -30,6 +30,7 @@ struct mce { | |||
30 | __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ | 30 | __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ |
31 | __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ | 31 | __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ |
32 | __u64 ppin; /* Protected Processor Inventory Number */ | 32 | __u64 ppin; /* Protected Processor Inventory Number */ |
33 | __u32 microcode;/* Microcode revision */ | ||
33 | }; | 34 | }; |
34 | 35 | ||
35 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) | 36 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 8ad2e410974f..7c5538769f7e 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1603,7 +1603,7 @@ static void __init delay_with_tsc(void) | |||
1603 | do { | 1603 | do { |
1604 | rep_nop(); | 1604 | rep_nop(); |
1605 | now = rdtsc(); | 1605 | now = rdtsc(); |
1606 | } while ((now - start) < 40000000000UL / HZ && | 1606 | } while ((now - start) < 40000000000ULL / HZ && |
1607 | time_before_eq(jiffies, end)); | 1607 | time_before_eq(jiffies, end)); |
1608 | } | 1608 | } |
1609 | 1609 | ||
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d71c8b54b696..bfca937bdcc3 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -300,6 +300,15 @@ retpoline_auto: | |||
300 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB); | 300 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB); |
301 | pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); | 301 | pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); |
302 | } | 302 | } |
303 | |||
304 | /* | ||
305 | * Retpoline means the kernel is safe because it has no indirect | ||
306 | * branches. But firmware isn't, so use IBRS to protect that. | ||
307 | */ | ||
308 | if (boot_cpu_has(X86_FEATURE_IBRS)) { | ||
309 | setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); | ||
310 | pr_info("Enabling Restricted Speculation for firmware calls\n"); | ||
311 | } | ||
303 | } | 312 | } |
304 | 313 | ||
305 | #undef pr_fmt | 314 | #undef pr_fmt |
@@ -326,8 +335,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c | |||
326 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | 335 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) |
327 | return sprintf(buf, "Not affected\n"); | 336 | return sprintf(buf, "Not affected\n"); |
328 | 337 | ||
329 | return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], | 338 | return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], |
330 | boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", | 339 | boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", |
340 | boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", | ||
331 | spectre_v2_module_string()); | 341 | spectre_v2_module_string()); |
332 | } | 342 | } |
333 | #endif | 343 | #endif |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 824aee0117bb..348cf4821240 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1749,3 +1749,33 @@ static int __init init_cpu_syscore(void) | |||
1749 | return 0; | 1749 | return 0; |
1750 | } | 1750 | } |
1751 | core_initcall(init_cpu_syscore); | 1751 | core_initcall(init_cpu_syscore); |
1752 | |||
1753 | /* | ||
1754 | * The microcode loader calls this upon late microcode load to recheck features, | ||
1755 | * only when microcode has been updated. Caller holds microcode_mutex and CPU | ||
1756 | * hotplug lock. | ||
1757 | */ | ||
1758 | void microcode_check(void) | ||
1759 | { | ||
1760 | struct cpuinfo_x86 info; | ||
1761 | |||
1762 | perf_check_microcode(); | ||
1763 | |||
1764 | /* Reload CPUID max function as it might've changed. */ | ||
1765 | info.cpuid_level = cpuid_eax(0); | ||
1766 | |||
1767 | /* | ||
1768 | * Copy all capability leafs to pick up the synthetic ones so that | ||
1769 | * memcmp() below doesn't fail on that. The ones coming from CPUID will | ||
1770 | * get overwritten in get_cpu_cap(). | ||
1771 | */ | ||
1772 | memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); | ||
1773 | |||
1774 | get_cpu_cap(&info); | ||
1775 | |||
1776 | if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) | ||
1777 | return; | ||
1778 | |||
1779 | pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); | ||
1780 | pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); | ||
1781 | } | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index d19e903214b4..c3af167d0a70 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) | |||
105 | /* | 105 | /* |
106 | * Early microcode releases for the Spectre v2 mitigation were broken. | 106 | * Early microcode releases for the Spectre v2 mitigation were broken. |
107 | * Information taken from; | 107 | * Information taken from; |
108 | * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf | 108 | * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf |
109 | * - https://kb.vmware.com/s/article/52345 | 109 | * - https://kb.vmware.com/s/article/52345 |
110 | * - Microcode revisions observed in the wild | 110 | * - Microcode revisions observed in the wild |
111 | * - Release note from 20180108 microcode release | 111 | * - Release note from 20180108 microcode release |
@@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = { | |||
123 | { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, | 123 | { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, |
124 | { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, | 124 | { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, |
125 | { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, | 125 | { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, |
126 | { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, | ||
127 | { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, | 126 | { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, |
128 | { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, | 127 | { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, |
129 | { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, | 128 | { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, |
@@ -144,6 +143,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) | |||
144 | { | 143 | { |
145 | int i; | 144 | int i; |
146 | 145 | ||
146 | /* | ||
147 | * We know that the hypervisor lie to us on the microcode version so | ||
148 | * we may as well hope that it is running the correct version. | ||
149 | */ | ||
150 | if (cpu_has(c, X86_FEATURE_HYPERVISOR)) | ||
151 | return false; | ||
152 | |||
147 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { | 153 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { |
148 | if (c->x86_model == spectre_bad_microcodes[i].model && | 154 | if (c->x86_model == spectre_bad_microcodes[i].model && |
149 | c->x86_stepping == spectre_bad_microcodes[i].stepping) | 155 | c->x86_stepping == spectre_bad_microcodes[i].stepping) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8ff94d1e2dce..466f47301334 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -56,6 +56,9 @@ | |||
56 | 56 | ||
57 | static DEFINE_MUTEX(mce_log_mutex); | 57 | static DEFINE_MUTEX(mce_log_mutex); |
58 | 58 | ||
59 | /* sysfs synchronization */ | ||
60 | static DEFINE_MUTEX(mce_sysfs_mutex); | ||
61 | |||
59 | #define CREATE_TRACE_POINTS | 62 | #define CREATE_TRACE_POINTS |
60 | #include <trace/events/mce.h> | 63 | #include <trace/events/mce.h> |
61 | 64 | ||
@@ -130,6 +133,8 @@ void mce_setup(struct mce *m) | |||
130 | 133 | ||
131 | if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) | 134 | if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) |
132 | rdmsrl(MSR_PPIN, m->ppin); | 135 | rdmsrl(MSR_PPIN, m->ppin); |
136 | |||
137 | m->microcode = boot_cpu_data.microcode; | ||
133 | } | 138 | } |
134 | 139 | ||
135 | DEFINE_PER_CPU(struct mce, injectm); | 140 | DEFINE_PER_CPU(struct mce, injectm); |
@@ -262,7 +267,7 @@ static void __print_mce(struct mce *m) | |||
262 | */ | 267 | */ |
263 | pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", | 268 | pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", |
264 | m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, | 269 | m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, |
265 | cpu_data(m->extcpu).microcode); | 270 | m->microcode); |
266 | } | 271 | } |
267 | 272 | ||
268 | static void print_mce(struct mce *m) | 273 | static void print_mce(struct mce *m) |
@@ -2086,6 +2091,7 @@ static ssize_t set_ignore_ce(struct device *s, | |||
2086 | if (kstrtou64(buf, 0, &new) < 0) | 2091 | if (kstrtou64(buf, 0, &new) < 0) |
2087 | return -EINVAL; | 2092 | return -EINVAL; |
2088 | 2093 | ||
2094 | mutex_lock(&mce_sysfs_mutex); | ||
2089 | if (mca_cfg.ignore_ce ^ !!new) { | 2095 | if (mca_cfg.ignore_ce ^ !!new) { |
2090 | if (new) { | 2096 | if (new) { |
2091 | /* disable ce features */ | 2097 | /* disable ce features */ |
@@ -2098,6 +2104,8 @@ static ssize_t set_ignore_ce(struct device *s, | |||
2098 | on_each_cpu(mce_enable_ce, (void *)1, 1); | 2104 | on_each_cpu(mce_enable_ce, (void *)1, 1); |
2099 | } | 2105 | } |
2100 | } | 2106 | } |
2107 | mutex_unlock(&mce_sysfs_mutex); | ||
2108 | |||
2101 | return size; | 2109 | return size; |
2102 | } | 2110 | } |
2103 | 2111 | ||
@@ -2110,6 +2118,7 @@ static ssize_t set_cmci_disabled(struct device *s, | |||
2110 | if (kstrtou64(buf, 0, &new) < 0) | 2118 | if (kstrtou64(buf, 0, &new) < 0) |
2111 | return -EINVAL; | 2119 | return -EINVAL; |
2112 | 2120 | ||
2121 | mutex_lock(&mce_sysfs_mutex); | ||
2113 | if (mca_cfg.cmci_disabled ^ !!new) { | 2122 | if (mca_cfg.cmci_disabled ^ !!new) { |
2114 | if (new) { | 2123 | if (new) { |
2115 | /* disable cmci */ | 2124 | /* disable cmci */ |
@@ -2121,6 +2130,8 @@ static ssize_t set_cmci_disabled(struct device *s, | |||
2121 | on_each_cpu(mce_enable_ce, NULL, 1); | 2130 | on_each_cpu(mce_enable_ce, NULL, 1); |
2122 | } | 2131 | } |
2123 | } | 2132 | } |
2133 | mutex_unlock(&mce_sysfs_mutex); | ||
2134 | |||
2124 | return size; | 2135 | return size; |
2125 | } | 2136 | } |
2126 | 2137 | ||
@@ -2128,8 +2139,19 @@ static ssize_t store_int_with_restart(struct device *s, | |||
2128 | struct device_attribute *attr, | 2139 | struct device_attribute *attr, |
2129 | const char *buf, size_t size) | 2140 | const char *buf, size_t size) |
2130 | { | 2141 | { |
2131 | ssize_t ret = device_store_int(s, attr, buf, size); | 2142 | unsigned long old_check_interval = check_interval; |
2143 | ssize_t ret = device_store_ulong(s, attr, buf, size); | ||
2144 | |||
2145 | if (check_interval == old_check_interval) | ||
2146 | return ret; | ||
2147 | |||
2148 | if (check_interval < 1) | ||
2149 | check_interval = 1; | ||
2150 | |||
2151 | mutex_lock(&mce_sysfs_mutex); | ||
2132 | mce_restart(); | 2152 | mce_restart(); |
2153 | mutex_unlock(&mce_sysfs_mutex); | ||
2154 | |||
2133 | return ret; | 2155 | return ret; |
2134 | } | 2156 | } |
2135 | 2157 | ||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 330b8462d426..48179928ff38 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) | |||
339 | return -EINVAL; | 339 | return -EINVAL; |
340 | 340 | ||
341 | ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); | 341 | ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); |
342 | if (ret != UCODE_OK) | 342 | if (ret > UCODE_UPDATED) |
343 | return -EINVAL; | 343 | return -EINVAL; |
344 | 344 | ||
345 | return 0; | 345 | return 0; |
@@ -498,7 +498,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, | |||
498 | return patch_size; | 498 | return patch_size; |
499 | } | 499 | } |
500 | 500 | ||
501 | static int apply_microcode_amd(int cpu) | 501 | static enum ucode_state apply_microcode_amd(int cpu) |
502 | { | 502 | { |
503 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 503 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
504 | struct microcode_amd *mc_amd; | 504 | struct microcode_amd *mc_amd; |
@@ -512,7 +512,7 @@ static int apply_microcode_amd(int cpu) | |||
512 | 512 | ||
513 | p = find_patch(cpu); | 513 | p = find_patch(cpu); |
514 | if (!p) | 514 | if (!p) |
515 | return 0; | 515 | return UCODE_NFOUND; |
516 | 516 | ||
517 | mc_amd = p->data; | 517 | mc_amd = p->data; |
518 | uci->mc = p->data; | 518 | uci->mc = p->data; |
@@ -523,13 +523,13 @@ static int apply_microcode_amd(int cpu) | |||
523 | if (rev >= mc_amd->hdr.patch_id) { | 523 | if (rev >= mc_amd->hdr.patch_id) { |
524 | c->microcode = rev; | 524 | c->microcode = rev; |
525 | uci->cpu_sig.rev = rev; | 525 | uci->cpu_sig.rev = rev; |
526 | return 0; | 526 | return UCODE_OK; |
527 | } | 527 | } |
528 | 528 | ||
529 | if (__apply_microcode_amd(mc_amd)) { | 529 | if (__apply_microcode_amd(mc_amd)) { |
530 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", | 530 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", |
531 | cpu, mc_amd->hdr.patch_id); | 531 | cpu, mc_amd->hdr.patch_id); |
532 | return -1; | 532 | return UCODE_ERROR; |
533 | } | 533 | } |
534 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, | 534 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, |
535 | mc_amd->hdr.patch_id); | 535 | mc_amd->hdr.patch_id); |
@@ -537,7 +537,7 @@ static int apply_microcode_amd(int cpu) | |||
537 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; | 537 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; |
538 | c->microcode = mc_amd->hdr.patch_id; | 538 | c->microcode = mc_amd->hdr.patch_id; |
539 | 539 | ||
540 | return 0; | 540 | return UCODE_UPDATED; |
541 | } | 541 | } |
542 | 542 | ||
543 | static int install_equiv_cpu_table(const u8 *buf) | 543 | static int install_equiv_cpu_table(const u8 *buf) |
@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, | |||
683 | static enum ucode_state | 683 | static enum ucode_state |
684 | load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) | 684 | load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) |
685 | { | 685 | { |
686 | struct ucode_patch *p; | ||
686 | enum ucode_state ret; | 687 | enum ucode_state ret; |
687 | 688 | ||
688 | /* free old equiv table */ | 689 | /* free old equiv table */ |
689 | free_equiv_cpu_table(); | 690 | free_equiv_cpu_table(); |
690 | 691 | ||
691 | ret = __load_microcode_amd(family, data, size); | 692 | ret = __load_microcode_amd(family, data, size); |
692 | 693 | if (ret != UCODE_OK) { | |
693 | if (ret != UCODE_OK) | ||
694 | cleanup(); | 694 | cleanup(); |
695 | return ret; | ||
696 | } | ||
695 | 697 | ||
696 | #ifdef CONFIG_X86_32 | 698 | p = find_patch(0); |
697 | /* save BSP's matching patch for early load */ | 699 | if (!p) { |
698 | if (save) { | 700 | return ret; |
699 | struct ucode_patch *p = find_patch(0); | 701 | } else { |
700 | if (p) { | 702 | if (boot_cpu_data.microcode == p->patch_id) |
701 | memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); | 703 | return ret; |
702 | memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), | 704 | |
703 | PATCH_MAX_SIZE)); | 705 | ret = UCODE_NEW; |
704 | } | ||
705 | } | 706 | } |
706 | #endif | 707 | |
708 | /* save BSP's matching patch for early load */ | ||
709 | if (!save) | ||
710 | return ret; | ||
711 | |||
712 | memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); | ||
713 | memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE)); | ||
714 | |||
707 | return ret; | 715 | return ret; |
708 | } | 716 | } |
709 | 717 | ||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 319dd65f98a2..10c4fc2c91f8 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
@@ -22,13 +22,16 @@ | |||
22 | #define pr_fmt(fmt) "microcode: " fmt | 22 | #define pr_fmt(fmt) "microcode: " fmt |
23 | 23 | ||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/stop_machine.h> | ||
25 | #include <linux/syscore_ops.h> | 26 | #include <linux/syscore_ops.h> |
26 | #include <linux/miscdevice.h> | 27 | #include <linux/miscdevice.h> |
27 | #include <linux/capability.h> | 28 | #include <linux/capability.h> |
28 | #include <linux/firmware.h> | 29 | #include <linux/firmware.h> |
29 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/delay.h> | ||
30 | #include <linux/mutex.h> | 32 | #include <linux/mutex.h> |
31 | #include <linux/cpu.h> | 33 | #include <linux/cpu.h> |
34 | #include <linux/nmi.h> | ||
32 | #include <linux/fs.h> | 35 | #include <linux/fs.h> |
33 | #include <linux/mm.h> | 36 | #include <linux/mm.h> |
34 | 37 | ||
@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache); | |||
64 | */ | 67 | */ |
65 | static DEFINE_MUTEX(microcode_mutex); | 68 | static DEFINE_MUTEX(microcode_mutex); |
66 | 69 | ||
70 | /* | ||
71 | * Serialize late loading so that CPUs get updated one-by-one. | ||
72 | */ | ||
73 | static DEFINE_SPINLOCK(update_lock); | ||
74 | |||
67 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; | 75 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; |
68 | 76 | ||
69 | struct cpu_info_ctx { | 77 | struct cpu_info_ctx { |
@@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu) | |||
373 | return ret; | 381 | return ret; |
374 | } | 382 | } |
375 | 383 | ||
376 | struct apply_microcode_ctx { | ||
377 | int err; | ||
378 | }; | ||
379 | |||
380 | static void apply_microcode_local(void *arg) | 384 | static void apply_microcode_local(void *arg) |
381 | { | 385 | { |
382 | struct apply_microcode_ctx *ctx = arg; | 386 | enum ucode_state *err = arg; |
383 | 387 | ||
384 | ctx->err = microcode_ops->apply_microcode(smp_processor_id()); | 388 | *err = microcode_ops->apply_microcode(smp_processor_id()); |
385 | } | 389 | } |
386 | 390 | ||
387 | static int apply_microcode_on_target(int cpu) | 391 | static int apply_microcode_on_target(int cpu) |
388 | { | 392 | { |
389 | struct apply_microcode_ctx ctx = { .err = 0 }; | 393 | enum ucode_state err; |
390 | int ret; | 394 | int ret; |
391 | 395 | ||
392 | ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); | 396 | ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1); |
393 | if (!ret) | 397 | if (!ret) { |
394 | ret = ctx.err; | 398 | if (err == UCODE_ERROR) |
395 | 399 | ret = 1; | |
400 | } | ||
396 | return ret; | 401 | return ret; |
397 | } | 402 | } |
398 | 403 | ||
@@ -489,31 +494,124 @@ static void __exit microcode_dev_exit(void) | |||
489 | /* fake device for request_firmware */ | 494 | /* fake device for request_firmware */ |
490 | static struct platform_device *microcode_pdev; | 495 | static struct platform_device *microcode_pdev; |
491 | 496 | ||
492 | static int reload_for_cpu(int cpu) | 497 | /* |
498 | * Late loading dance. Why the heavy-handed stomp_machine effort? | ||
499 | * | ||
500 | * - HT siblings must be idle and not execute other code while the other sibling | ||
501 | * is loading microcode in order to avoid any negative interactions caused by | ||
502 | * the loading. | ||
503 | * | ||
504 | * - In addition, microcode update on the cores must be serialized until this | ||
505 | * requirement can be relaxed in the future. Right now, this is conservative | ||
506 | * and good. | ||
507 | */ | ||
508 | #define SPINUNIT 100 /* 100 nsec */ | ||
509 | |||
510 | static int check_online_cpus(void) | ||
493 | { | 511 | { |
494 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 512 | if (num_online_cpus() == num_present_cpus()) |
495 | enum ucode_state ustate; | 513 | return 0; |
496 | int err = 0; | ||
497 | 514 | ||
498 | if (!uci->valid) | 515 | pr_err("Not all CPUs online, aborting microcode update.\n"); |
499 | return err; | ||
500 | 516 | ||
501 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true); | 517 | return -EINVAL; |
502 | if (ustate == UCODE_OK) | 518 | } |
503 | apply_microcode_on_target(cpu); | 519 | |
504 | else | 520 | static atomic_t late_cpus_in; |
505 | if (ustate == UCODE_ERROR) | 521 | static atomic_t late_cpus_out; |
506 | err = -EINVAL; | 522 | |
507 | return err; | 523 | static int __wait_for_cpus(atomic_t *t, long long timeout) |
524 | { | ||
525 | int all_cpus = num_online_cpus(); | ||
526 | |||
527 | atomic_inc(t); | ||
528 | |||
529 | while (atomic_read(t) < all_cpus) { | ||
530 | if (timeout < SPINUNIT) { | ||
531 | pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", | ||
532 | all_cpus - atomic_read(t)); | ||
533 | return 1; | ||
534 | } | ||
535 | |||
536 | ndelay(SPINUNIT); | ||
537 | timeout -= SPINUNIT; | ||
538 | |||
539 | touch_nmi_watchdog(); | ||
540 | } | ||
541 | return 0; | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Returns: | ||
546 | * < 0 - on error | ||
547 | * 0 - no update done | ||
548 | * 1 - microcode was updated | ||
549 | */ | ||
550 | static int __reload_late(void *info) | ||
551 | { | ||
552 | int cpu = smp_processor_id(); | ||
553 | enum ucode_state err; | ||
554 | int ret = 0; | ||
555 | |||
556 | /* | ||
557 | * Wait for all CPUs to arrive. A load will not be attempted unless all | ||
558 | * CPUs show up. | ||
559 | * */ | ||
560 | if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) | ||
561 | return -1; | ||
562 | |||
563 | spin_lock(&update_lock); | ||
564 | apply_microcode_local(&err); | ||
565 | spin_unlock(&update_lock); | ||
566 | |||
567 | if (err > UCODE_NFOUND) { | ||
568 | pr_warn("Error reloading microcode on CPU %d\n", cpu); | ||
569 | return -1; | ||
570 | /* siblings return UCODE_OK because their engine got updated already */ | ||
571 | } else if (err == UCODE_UPDATED || err == UCODE_OK) { | ||
572 | ret = 1; | ||
573 | } else { | ||
574 | return ret; | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * Increase the wait timeout to a safe value here since we're | ||
579 | * serializing the microcode update and that could take a while on a | ||
580 | * large number of CPUs. And that is fine as the *actual* timeout will | ||
581 | * be determined by the last CPU finished updating and thus cut short. | ||
582 | */ | ||
583 | if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus())) | ||
584 | panic("Timeout during microcode update!\n"); | ||
585 | |||
586 | return ret; | ||
587 | } | ||
588 | |||
589 | /* | ||
590 | * Reload microcode late on all CPUs. Wait for a sec until they | ||
591 | * all gather together. | ||
592 | */ | ||
593 | static int microcode_reload_late(void) | ||
594 | { | ||
595 | int ret; | ||
596 | |||
597 | atomic_set(&late_cpus_in, 0); | ||
598 | atomic_set(&late_cpus_out, 0); | ||
599 | |||
600 | ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); | ||
601 | if (ret > 0) | ||
602 | microcode_check(); | ||
603 | |||
604 | return ret; | ||
508 | } | 605 | } |
509 | 606 | ||
510 | static ssize_t reload_store(struct device *dev, | 607 | static ssize_t reload_store(struct device *dev, |
511 | struct device_attribute *attr, | 608 | struct device_attribute *attr, |
512 | const char *buf, size_t size) | 609 | const char *buf, size_t size) |
513 | { | 610 | { |
611 | enum ucode_state tmp_ret = UCODE_OK; | ||
612 | int bsp = boot_cpu_data.cpu_index; | ||
514 | unsigned long val; | 613 | unsigned long val; |
515 | int cpu; | 614 | ssize_t ret = 0; |
516 | ssize_t ret = 0, tmp_ret; | ||
517 | 615 | ||
518 | ret = kstrtoul(buf, 0, &val); | 616 | ret = kstrtoul(buf, 0, &val); |
519 | if (ret) | 617 | if (ret) |
@@ -522,23 +620,24 @@ static ssize_t reload_store(struct device *dev, | |||
522 | if (val != 1) | 620 | if (val != 1) |
523 | return size; | 621 | return size; |
524 | 622 | ||
623 | tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); | ||
624 | if (tmp_ret != UCODE_NEW) | ||
625 | return size; | ||
626 | |||
525 | get_online_cpus(); | 627 | get_online_cpus(); |
526 | mutex_lock(µcode_mutex); | ||
527 | for_each_online_cpu(cpu) { | ||
528 | tmp_ret = reload_for_cpu(cpu); | ||
529 | if (tmp_ret != 0) | ||
530 | pr_warn("Error reloading microcode on CPU %d\n", cpu); | ||
531 | 628 | ||
532 | /* save retval of the first encountered reload error */ | 629 | ret = check_online_cpus(); |
533 | if (!ret) | 630 | if (ret) |
534 | ret = tmp_ret; | 631 | goto put; |
535 | } | 632 | |
536 | if (!ret) | 633 | mutex_lock(µcode_mutex); |
537 | perf_check_microcode(); | 634 | ret = microcode_reload_late(); |
538 | mutex_unlock(µcode_mutex); | 635 | mutex_unlock(µcode_mutex); |
636 | |||
637 | put: | ||
539 | put_online_cpus(); | 638 | put_online_cpus(); |
540 | 639 | ||
541 | if (!ret) | 640 | if (ret >= 0) |
542 | ret = size; | 641 | ret = size; |
543 | 642 | ||
544 | return ret; | 643 | return ret; |
@@ -606,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw) | |||
606 | if (system_state != SYSTEM_RUNNING) | 705 | if (system_state != SYSTEM_RUNNING) |
607 | return UCODE_NFOUND; | 706 | return UCODE_NFOUND; |
608 | 707 | ||
609 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, | 708 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw); |
610 | refresh_fw); | 709 | if (ustate == UCODE_NEW) { |
611 | |||
612 | if (ustate == UCODE_OK) { | ||
613 | pr_debug("CPU%d updated upon init\n", cpu); | 710 | pr_debug("CPU%d updated upon init\n", cpu); |
614 | apply_microcode_on_target(cpu); | 711 | apply_microcode_on_target(cpu); |
615 | } | 712 | } |
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index a15db2b4e0d6..32b8e5724f96 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) | |||
589 | if (!mc) | 589 | if (!mc) |
590 | return 0; | 590 | return 0; |
591 | 591 | ||
592 | /* | ||
593 | * Save us the MSR write below - which is a particular expensive | ||
594 | * operation - when the other hyperthread has updated the microcode | ||
595 | * already. | ||
596 | */ | ||
597 | rev = intel_get_microcode_revision(); | ||
598 | if (rev >= mc->hdr.rev) { | ||
599 | uci->cpu_sig.rev = rev; | ||
600 | return UCODE_OK; | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * Writeback and invalidate caches before updating microcode to avoid | ||
605 | * internal issues depending on what the microcode is updating. | ||
606 | */ | ||
607 | native_wbinvd(); | ||
608 | |||
592 | /* write microcode via MSR 0x79 */ | 609 | /* write microcode via MSR 0x79 */ |
593 | native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); | 610 | native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); |
594 | 611 | ||
@@ -772,27 +789,44 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | |||
772 | return 0; | 789 | return 0; |
773 | } | 790 | } |
774 | 791 | ||
775 | static int apply_microcode_intel(int cpu) | 792 | static enum ucode_state apply_microcode_intel(int cpu) |
776 | { | 793 | { |
794 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
795 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
777 | struct microcode_intel *mc; | 796 | struct microcode_intel *mc; |
778 | struct ucode_cpu_info *uci; | ||
779 | struct cpuinfo_x86 *c; | ||
780 | static int prev_rev; | 797 | static int prev_rev; |
781 | u32 rev; | 798 | u32 rev; |
782 | 799 | ||
783 | /* We should bind the task to the CPU */ | 800 | /* We should bind the task to the CPU */ |
784 | if (WARN_ON(raw_smp_processor_id() != cpu)) | 801 | if (WARN_ON(raw_smp_processor_id() != cpu)) |
785 | return -1; | 802 | return UCODE_ERROR; |
786 | 803 | ||
787 | uci = ucode_cpu_info + cpu; | 804 | /* Look for a newer patch in our cache: */ |
788 | mc = uci->mc; | 805 | mc = find_patch(uci); |
789 | if (!mc) { | 806 | if (!mc) { |
790 | /* Look for a newer patch in our cache: */ | 807 | mc = uci->mc; |
791 | mc = find_patch(uci); | ||
792 | if (!mc) | 808 | if (!mc) |
793 | return 0; | 809 | return UCODE_NFOUND; |
794 | } | 810 | } |
795 | 811 | ||
812 | /* | ||
813 | * Save us the MSR write below - which is a particular expensive | ||
814 | * operation - when the other hyperthread has updated the microcode | ||
815 | * already. | ||
816 | */ | ||
817 | rev = intel_get_microcode_revision(); | ||
818 | if (rev >= mc->hdr.rev) { | ||
819 | uci->cpu_sig.rev = rev; | ||
820 | c->microcode = rev; | ||
821 | return UCODE_OK; | ||
822 | } | ||
823 | |||
824 | /* | ||
825 | * Writeback and invalidate caches before updating microcode to avoid | ||
826 | * internal issues depending on what the microcode is updating. | ||
827 | */ | ||
828 | native_wbinvd(); | ||
829 | |||
796 | /* write microcode via MSR 0x79 */ | 830 | /* write microcode via MSR 0x79 */ |
797 | wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); | 831 | wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); |
798 | 832 | ||
@@ -801,7 +835,7 @@ static int apply_microcode_intel(int cpu) | |||
801 | if (rev != mc->hdr.rev) { | 835 | if (rev != mc->hdr.rev) { |
802 | pr_err("CPU%d update to revision 0x%x failed\n", | 836 | pr_err("CPU%d update to revision 0x%x failed\n", |
803 | cpu, mc->hdr.rev); | 837 | cpu, mc->hdr.rev); |
804 | return -1; | 838 | return UCODE_ERROR; |
805 | } | 839 | } |
806 | 840 | ||
807 | if (rev != prev_rev) { | 841 | if (rev != prev_rev) { |
@@ -813,12 +847,10 @@ static int apply_microcode_intel(int cpu) | |||
813 | prev_rev = rev; | 847 | prev_rev = rev; |
814 | } | 848 | } |
815 | 849 | ||
816 | c = &cpu_data(cpu); | ||
817 | |||
818 | uci->cpu_sig.rev = rev; | 850 | uci->cpu_sig.rev = rev; |
819 | c->microcode = rev; | 851 | c->microcode = rev; |
820 | 852 | ||
821 | return 0; | 853 | return UCODE_UPDATED; |
822 | } | 854 | } |
823 | 855 | ||
824 | static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | 856 | static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, |
@@ -830,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
830 | unsigned int leftover = size; | 862 | unsigned int leftover = size; |
831 | unsigned int curr_mc_size = 0, new_mc_size = 0; | 863 | unsigned int curr_mc_size = 0, new_mc_size = 0; |
832 | unsigned int csig, cpf; | 864 | unsigned int csig, cpf; |
865 | enum ucode_state ret = UCODE_OK; | ||
833 | 866 | ||
834 | while (leftover) { | 867 | while (leftover) { |
835 | struct microcode_header_intel mc_header; | 868 | struct microcode_header_intel mc_header; |
@@ -871,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
871 | new_mc = mc; | 904 | new_mc = mc; |
872 | new_mc_size = mc_size; | 905 | new_mc_size = mc_size; |
873 | mc = NULL; /* trigger new vmalloc */ | 906 | mc = NULL; /* trigger new vmalloc */ |
907 | ret = UCODE_NEW; | ||
874 | } | 908 | } |
875 | 909 | ||
876 | ucode_ptr += mc_size; | 910 | ucode_ptr += mc_size; |
@@ -900,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
900 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", | 934 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
901 | cpu, new_rev, uci->cpu_sig.rev); | 935 | cpu, new_rev, uci->cpu_sig.rev); |
902 | 936 | ||
903 | return UCODE_OK; | 937 | return ret; |
904 | } | 938 | } |
905 | 939 | ||
906 | static int get_ucode_fw(void *to, const void *from, size_t n) | 940 | static int get_ucode_fw(void *to, const void *from, size_t n) |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 04a625f0fcda..0f545b3cf926 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/nops.h> | 23 | #include <asm/nops.h> |
24 | #include "../entry/calling.h" | 24 | #include "../entry/calling.h" |
25 | #include <asm/export.h> | 25 | #include <asm/export.h> |
26 | #include <asm/nospec-branch.h> | ||
26 | 27 | ||
27 | #ifdef CONFIG_PARAVIRT | 28 | #ifdef CONFIG_PARAVIRT |
28 | #include <asm/asm-offsets.h> | 29 | #include <asm/asm-offsets.h> |
@@ -134,6 +135,7 @@ ENTRY(secondary_startup_64) | |||
134 | 135 | ||
135 | /* Ensure I am executing from virtual addresses */ | 136 | /* Ensure I am executing from virtual addresses */ |
136 | movq $1f, %rax | 137 | movq $1f, %rax |
138 | ANNOTATE_RETPOLINE_SAFE | ||
137 | jmp *%rax | 139 | jmp *%rax |
138 | 1: | 140 | 1: |
139 | UNWIND_HINT_EMPTY | 141 | UNWIND_HINT_EMPTY |
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 2f723301eb58..38deafebb21b 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c | |||
@@ -23,7 +23,7 @@ | |||
23 | /* | 23 | /* |
24 | * this changes the io permissions bitmap in the current task. | 24 | * this changes the io permissions bitmap in the current task. |
25 | */ | 25 | */ |
26 | asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) | 26 | SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on) |
27 | { | 27 | { |
28 | struct thread_struct *t = ¤t->thread; | 28 | struct thread_struct *t = ¤t->thread; |
29 | struct tss_struct *tss; | 29 | struct tss_struct *tss; |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index bd36f3c33cd0..0715f827607c 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler); | |||
1168 | 1168 | ||
1169 | bool arch_within_kprobe_blacklist(unsigned long addr) | 1169 | bool arch_within_kprobe_blacklist(unsigned long addr) |
1170 | { | 1170 | { |
1171 | bool is_in_entry_trampoline_section = false; | ||
1172 | |||
1173 | #ifdef CONFIG_X86_64 | ||
1174 | is_in_entry_trampoline_section = | ||
1175 | (addr >= (unsigned long)__entry_trampoline_start && | ||
1176 | addr < (unsigned long)__entry_trampoline_end); | ||
1177 | #endif | ||
1171 | return (addr >= (unsigned long)__kprobes_text_start && | 1178 | return (addr >= (unsigned long)__kprobes_text_start && |
1172 | addr < (unsigned long)__kprobes_text_end) || | 1179 | addr < (unsigned long)__kprobes_text_end) || |
1173 | (addr >= (unsigned long)__entry_text_start && | 1180 | (addr >= (unsigned long)__entry_text_start && |
1174 | addr < (unsigned long)__entry_text_end); | 1181 | addr < (unsigned long)__entry_text_end) || |
1182 | is_in_entry_trampoline_section; | ||
1175 | } | 1183 | } |
1176 | 1184 | ||
1177 | int __init arch_init_kprobes(void) | 1185 | int __init arch_init_kprobes(void) |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 4e37d1a851a6..bc1a27280c4b 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | static int kvmapf = 1; | 50 | static int kvmapf = 1; |
51 | 51 | ||
52 | static int parse_no_kvmapf(char *arg) | 52 | static int __init parse_no_kvmapf(char *arg) |
53 | { | 53 | { |
54 | kvmapf = 0; | 54 | kvmapf = 0; |
55 | return 0; | 55 | return 0; |
@@ -58,7 +58,7 @@ static int parse_no_kvmapf(char *arg) | |||
58 | early_param("no-kvmapf", parse_no_kvmapf); | 58 | early_param("no-kvmapf", parse_no_kvmapf); |
59 | 59 | ||
60 | static int steal_acc = 1; | 60 | static int steal_acc = 1; |
61 | static int parse_no_stealacc(char *arg) | 61 | static int __init parse_no_stealacc(char *arg) |
62 | { | 62 | { |
63 | steal_acc = 0; | 63 | steal_acc = 0; |
64 | return 0; | 64 | return 0; |
@@ -67,7 +67,7 @@ static int parse_no_stealacc(char *arg) | |||
67 | early_param("no-steal-acc", parse_no_stealacc); | 67 | early_param("no-steal-acc", parse_no_stealacc); |
68 | 68 | ||
69 | static int kvmclock_vsyscall = 1; | 69 | static int kvmclock_vsyscall = 1; |
70 | static int parse_no_kvmclock_vsyscall(char *arg) | 70 | static int __init parse_no_kvmclock_vsyscall(char *arg) |
71 | { | 71 | { |
72 | kvmclock_vsyscall = 0; | 72 | kvmclock_vsyscall = 0; |
73 | return 0; | 73 | return 0; |
@@ -341,10 +341,10 @@ static void kvm_guest_cpu_init(void) | |||
341 | #endif | 341 | #endif |
342 | pa |= KVM_ASYNC_PF_ENABLED; | 342 | pa |= KVM_ASYNC_PF_ENABLED; |
343 | 343 | ||
344 | /* Async page fault support for L1 hypervisor is optional */ | 344 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) |
345 | if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN, | 345 | pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; |
346 | (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0) | 346 | |
347 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); | 347 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); |
348 | __this_cpu_write(apf_reason.enabled, 1); | 348 | __this_cpu_write(apf_reason.enabled, 1); |
349 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", | 349 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", |
350 | smp_processor_id()); | 350 | smp_processor_id()); |
@@ -545,7 +545,8 @@ static void __init kvm_guest_init(void) | |||
545 | pv_time_ops.steal_clock = kvm_steal_clock; | 545 | pv_time_ops.steal_clock = kvm_steal_clock; |
546 | } | 546 | } |
547 | 547 | ||
548 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) | 548 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && |
549 | !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) | ||
549 | pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; | 550 | pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; |
550 | 551 | ||
551 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) | 552 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) |
@@ -633,7 +634,8 @@ static __init int kvm_setup_pv_tlb_flush(void) | |||
633 | { | 634 | { |
634 | int cpu; | 635 | int cpu; |
635 | 636 | ||
636 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) { | 637 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && |
638 | !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { | ||
637 | for_each_possible_cpu(cpu) { | 639 | for_each_possible_cpu(cpu) { |
638 | zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), | 640 | zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), |
639 | GFP_KERNEL, cpu_to_node(cpu)); | 641 | GFP_KERNEL, cpu_to_node(cpu)); |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 1ae67e982af7..4c616be28506 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -1204,20 +1204,13 @@ void __init setup_arch(char **cmdline_p) | |||
1204 | 1204 | ||
1205 | kasan_init(); | 1205 | kasan_init(); |
1206 | 1206 | ||
1207 | #ifdef CONFIG_X86_32 | ||
1208 | /* sync back kernel address range */ | ||
1209 | clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, | ||
1210 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
1211 | KERNEL_PGD_PTRS); | ||
1212 | |||
1213 | /* | 1207 | /* |
1214 | * sync back low identity map too. It is used for example | 1208 | * Sync back kernel address range. |
1215 | * in the 32-bit EFI stub. | 1209 | * |
1210 | * FIXME: Can the later sync in setup_cpu_entry_areas() replace | ||
1211 | * this call? | ||
1216 | */ | 1212 | */ |
1217 | clone_pgd_range(initial_page_table, | 1213 | sync_initial_page_table(); |
1218 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
1219 | min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); | ||
1220 | #endif | ||
1221 | 1214 | ||
1222 | tboot_probe(); | 1215 | tboot_probe(); |
1223 | 1216 | ||
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 497aa766fab3..ea554f812ee1 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void) | |||
287 | /* Setup cpu initialized, callin, callout masks */ | 287 | /* Setup cpu initialized, callin, callout masks */ |
288 | setup_cpu_local_masks(); | 288 | setup_cpu_local_masks(); |
289 | 289 | ||
290 | #ifdef CONFIG_X86_32 | ||
291 | /* | 290 | /* |
292 | * Sync back kernel address range again. We already did this in | 291 | * Sync back kernel address range again. We already did this in |
293 | * setup_arch(), but percpu data also needs to be available in | 292 | * setup_arch(), but percpu data also needs to be available in |
294 | * the smpboot asm. We can't reliably pick up percpu mappings | 293 | * the smpboot asm. We can't reliably pick up percpu mappings |
295 | * using vmalloc_fault(), because exception dispatch needs | 294 | * using vmalloc_fault(), because exception dispatch needs |
296 | * percpu data. | 295 | * percpu data. |
296 | * | ||
297 | * FIXME: Can the later sync in setup_cpu_entry_areas() replace | ||
298 | * this call? | ||
297 | */ | 299 | */ |
298 | clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, | 300 | sync_initial_page_table(); |
299 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
300 | KERNEL_PGD_PTRS); | ||
301 | |||
302 | /* | ||
303 | * sync back low identity map too. It is used for example | ||
304 | * in the 32-bit EFI stub. | ||
305 | */ | ||
306 | clone_pgd_range(initial_page_table, | ||
307 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
308 | min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); | ||
309 | #endif | ||
310 | } | 301 | } |
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index ac057f9b0763..0d930d8987cc 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c | |||
@@ -43,6 +43,13 @@ static inline void signal_compat_build_tests(void) | |||
43 | BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int)); | 43 | BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int)); |
44 | #define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name)) | 44 | #define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name)) |
45 | 45 | ||
46 | BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0); | ||
47 | BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4); | ||
48 | BUILD_BUG_ON(offsetof(siginfo_t, si_code) != 8); | ||
49 | |||
50 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_signo) != 0); | ||
51 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_errno) != 4); | ||
52 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_code) != 8); | ||
46 | /* | 53 | /* |
47 | * Ensure that the size of each si_field never changes. | 54 | * Ensure that the size of each si_field never changes. |
48 | * If it does, it is a sign that the | 55 | * If it does, it is a sign that the |
@@ -63,36 +70,94 @@ static inline void signal_compat_build_tests(void) | |||
63 | CHECK_CSI_SIZE (_kill, 2*sizeof(int)); | 70 | CHECK_CSI_SIZE (_kill, 2*sizeof(int)); |
64 | CHECK_SI_SIZE (_kill, 2*sizeof(int)); | 71 | CHECK_SI_SIZE (_kill, 2*sizeof(int)); |
65 | 72 | ||
73 | BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10); | ||
74 | BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14); | ||
75 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0xC); | ||
76 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10); | ||
77 | |||
66 | CHECK_CSI_OFFSET(_timer); | 78 | CHECK_CSI_OFFSET(_timer); |
67 | CHECK_CSI_SIZE (_timer, 3*sizeof(int)); | 79 | CHECK_CSI_SIZE (_timer, 3*sizeof(int)); |
68 | CHECK_SI_SIZE (_timer, 6*sizeof(int)); | 80 | CHECK_SI_SIZE (_timer, 6*sizeof(int)); |
69 | 81 | ||
82 | BUILD_BUG_ON(offsetof(siginfo_t, si_tid) != 0x10); | ||
83 | BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x14); | ||
84 | BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18); | ||
85 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_tid) != 0x0C); | ||
86 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_overrun) != 0x10); | ||
87 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14); | ||
88 | |||
70 | CHECK_CSI_OFFSET(_rt); | 89 | CHECK_CSI_OFFSET(_rt); |
71 | CHECK_CSI_SIZE (_rt, 3*sizeof(int)); | 90 | CHECK_CSI_SIZE (_rt, 3*sizeof(int)); |
72 | CHECK_SI_SIZE (_rt, 4*sizeof(int)); | 91 | CHECK_SI_SIZE (_rt, 4*sizeof(int)); |
73 | 92 | ||
93 | BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10); | ||
94 | BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14); | ||
95 | BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18); | ||
96 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C); | ||
97 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10); | ||
98 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14); | ||
99 | |||
74 | CHECK_CSI_OFFSET(_sigchld); | 100 | CHECK_CSI_OFFSET(_sigchld); |
75 | CHECK_CSI_SIZE (_sigchld, 5*sizeof(int)); | 101 | CHECK_CSI_SIZE (_sigchld, 5*sizeof(int)); |
76 | CHECK_SI_SIZE (_sigchld, 8*sizeof(int)); | 102 | CHECK_SI_SIZE (_sigchld, 8*sizeof(int)); |
77 | 103 | ||
104 | BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10); | ||
105 | BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14); | ||
106 | BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x18); | ||
107 | BUILD_BUG_ON(offsetof(siginfo_t, si_utime) != 0x20); | ||
108 | BUILD_BUG_ON(offsetof(siginfo_t, si_stime) != 0x28); | ||
109 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C); | ||
110 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10); | ||
111 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_status) != 0x14); | ||
112 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_utime) != 0x18); | ||
113 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_stime) != 0x1C); | ||
114 | |||
78 | #ifdef CONFIG_X86_X32_ABI | 115 | #ifdef CONFIG_X86_X32_ABI |
79 | CHECK_CSI_OFFSET(_sigchld_x32); | 116 | CHECK_CSI_OFFSET(_sigchld_x32); |
80 | CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int)); | 117 | CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int)); |
81 | /* no _sigchld_x32 in the generic siginfo_t */ | 118 | /* no _sigchld_x32 in the generic siginfo_t */ |
119 | BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime) != 0x18); | ||
120 | BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime) != 0x20); | ||
82 | #endif | 121 | #endif |
83 | 122 | ||
84 | CHECK_CSI_OFFSET(_sigfault); | 123 | CHECK_CSI_OFFSET(_sigfault); |
85 | CHECK_CSI_SIZE (_sigfault, 4*sizeof(int)); | 124 | CHECK_CSI_SIZE (_sigfault, 4*sizeof(int)); |
86 | CHECK_SI_SIZE (_sigfault, 8*sizeof(int)); | 125 | CHECK_SI_SIZE (_sigfault, 8*sizeof(int)); |
87 | 126 | ||
127 | BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10); | ||
128 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C); | ||
129 | |||
130 | BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18); | ||
131 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10); | ||
132 | |||
133 | BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x20); | ||
134 | BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x28); | ||
135 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_lower) != 0x14); | ||
136 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_upper) != 0x18); | ||
137 | |||
138 | BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20); | ||
139 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14); | ||
140 | |||
88 | CHECK_CSI_OFFSET(_sigpoll); | 141 | CHECK_CSI_OFFSET(_sigpoll); |
89 | CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int)); | 142 | CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int)); |
90 | CHECK_SI_SIZE (_sigpoll, 4*sizeof(int)); | 143 | CHECK_SI_SIZE (_sigpoll, 4*sizeof(int)); |
91 | 144 | ||
145 | BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x10); | ||
146 | BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x18); | ||
147 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_band) != 0x0C); | ||
148 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_fd) != 0x10); | ||
149 | |||
92 | CHECK_CSI_OFFSET(_sigsys); | 150 | CHECK_CSI_OFFSET(_sigsys); |
93 | CHECK_CSI_SIZE (_sigsys, 3*sizeof(int)); | 151 | CHECK_CSI_SIZE (_sigsys, 3*sizeof(int)); |
94 | CHECK_SI_SIZE (_sigsys, 4*sizeof(int)); | 152 | CHECK_SI_SIZE (_sigsys, 4*sizeof(int)); |
95 | 153 | ||
154 | BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x10); | ||
155 | BUILD_BUG_ON(offsetof(siginfo_t, si_syscall) != 0x18); | ||
156 | BUILD_BUG_ON(offsetof(siginfo_t, si_arch) != 0x1C); | ||
157 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_call_addr) != 0x0C); | ||
158 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_syscall) != 0x10); | ||
159 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_arch) != 0x14); | ||
160 | |||
96 | /* any new si_fields should be added here */ | 161 | /* any new si_fields should be added here */ |
97 | } | 162 | } |
98 | 163 | ||
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 1f9188f5357c..feb28fee6cea 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <asm/unwind.h> | 5 | #include <asm/unwind.h> |
6 | #include <asm/orc_types.h> | 6 | #include <asm/orc_types.h> |
7 | #include <asm/orc_lookup.h> | 7 | #include <asm/orc_lookup.h> |
8 | #include <asm/sections.h> | ||
9 | 8 | ||
10 | #define orc_warn(fmt, ...) \ | 9 | #define orc_warn(fmt, ...) \ |
11 | printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__) | 10 | printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__) |
@@ -148,7 +147,7 @@ static struct orc_entry *orc_find(unsigned long ip) | |||
148 | } | 147 | } |
149 | 148 | ||
150 | /* vmlinux .init slow lookup: */ | 149 | /* vmlinux .init slow lookup: */ |
151 | if (ip >= (unsigned long)_sinittext && ip < (unsigned long)_einittext) | 150 | if (init_kernel_text(ip)) |
152 | return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, | 151 | return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, |
153 | __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); | 152 | __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); |
154 | 153 | ||
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 5edb27f1a2c4..9d0b5af7db91 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) | |||
727 | return; | 727 | return; |
728 | 728 | ||
729 | check_vip: | 729 | check_vip: |
730 | if (VEFLAGS & X86_EFLAGS_VIP) { | 730 | if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) == |
731 | (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) { | ||
731 | save_v86_state(regs, VM86_STI); | 732 | save_v86_state(regs, VM86_STI); |
732 | return; | 733 | return; |
733 | } | 734 | } |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 9b138a06c1a4..b854ebf5851b 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -118,9 +118,11 @@ SECTIONS | |||
118 | 118 | ||
119 | #ifdef CONFIG_X86_64 | 119 | #ifdef CONFIG_X86_64 |
120 | . = ALIGN(PAGE_SIZE); | 120 | . = ALIGN(PAGE_SIZE); |
121 | VMLINUX_SYMBOL(__entry_trampoline_start) = .; | ||
121 | _entry_trampoline = .; | 122 | _entry_trampoline = .; |
122 | *(.entry_trampoline) | 123 | *(.entry_trampoline) |
123 | . = ALIGN(PAGE_SIZE); | 124 | . = ALIGN(PAGE_SIZE); |
125 | VMLINUX_SYMBOL(__entry_trampoline_end) = .; | ||
124 | ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); | 126 | ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); |
125 | #endif | 127 | #endif |
126 | 128 | ||
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index a0c5a69bc7c4..b671fc2d0422 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
@@ -607,7 +607,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
607 | (1 << KVM_FEATURE_PV_EOI) | | 607 | (1 << KVM_FEATURE_PV_EOI) | |
608 | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | | 608 | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | |
609 | (1 << KVM_FEATURE_PV_UNHALT) | | 609 | (1 << KVM_FEATURE_PV_UNHALT) | |
610 | (1 << KVM_FEATURE_PV_TLB_FLUSH); | 610 | (1 << KVM_FEATURE_PV_TLB_FLUSH) | |
611 | (1 << KVM_FEATURE_ASYNC_PF_VMEXIT); | ||
611 | 612 | ||
612 | if (sched_info_on()) | 613 | if (sched_info_on()) |
613 | entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); | 614 | entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 924ac8ce9d50..391dda8d43b7 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -2002,14 +2002,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
2002 | 2002 | ||
2003 | void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) | 2003 | void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) |
2004 | { | 2004 | { |
2005 | struct kvm_lapic *apic; | 2005 | struct kvm_lapic *apic = vcpu->arch.apic; |
2006 | int i; | 2006 | int i; |
2007 | 2007 | ||
2008 | apic_debug("%s\n", __func__); | 2008 | if (!apic) |
2009 | return; | ||
2009 | 2010 | ||
2010 | ASSERT(vcpu); | 2011 | apic_debug("%s\n", __func__); |
2011 | apic = vcpu->arch.apic; | ||
2012 | ASSERT(apic != NULL); | ||
2013 | 2012 | ||
2014 | /* Stop the timer in case it's a reset to an active apic */ | 2013 | /* Stop the timer in case it's a reset to an active apic */ |
2015 | hrtimer_cancel(&apic->lapic_timer.timer); | 2014 | hrtimer_cancel(&apic->lapic_timer.timer); |
@@ -2165,7 +2164,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) | |||
2165 | */ | 2164 | */ |
2166 | vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; | 2165 | vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; |
2167 | static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ | 2166 | static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ |
2168 | kvm_lapic_reset(vcpu, false); | ||
2169 | kvm_iodevice_init(&apic->dev, &apic_mmio_ops); | 2167 | kvm_iodevice_init(&apic->dev, &apic_mmio_ops); |
2170 | 2168 | ||
2171 | return 0; | 2169 | return 0; |
@@ -2569,7 +2567,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) | |||
2569 | 2567 | ||
2570 | pe = xchg(&apic->pending_events, 0); | 2568 | pe = xchg(&apic->pending_events, 0); |
2571 | if (test_bit(KVM_APIC_INIT, &pe)) { | 2569 | if (test_bit(KVM_APIC_INIT, &pe)) { |
2572 | kvm_lapic_reset(vcpu, true); | ||
2573 | kvm_vcpu_reset(vcpu, true); | 2570 | kvm_vcpu_reset(vcpu, true); |
2574 | if (kvm_vcpu_is_bsp(apic->vcpu)) | 2571 | if (kvm_vcpu_is_bsp(apic->vcpu)) |
2575 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 2572 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 46ff304140c7..763bb3bade63 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
2770 | else | 2770 | else |
2771 | pte_access &= ~ACC_WRITE_MASK; | 2771 | pte_access &= ~ACC_WRITE_MASK; |
2772 | 2772 | ||
2773 | if (!kvm_is_mmio_pfn(pfn)) | ||
2774 | spte |= shadow_me_mask; | ||
2775 | |||
2773 | spte |= (u64)pfn << PAGE_SHIFT; | 2776 | spte |= (u64)pfn << PAGE_SHIFT; |
2774 | spte |= shadow_me_mask; | ||
2775 | 2777 | ||
2776 | if (pte_access & ACC_WRITE_MASK) { | 2778 | if (pte_access & ACC_WRITE_MASK) { |
2777 | 2779 | ||
@@ -3029,7 +3031,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) | |||
3029 | return RET_PF_RETRY; | 3031 | return RET_PF_RETRY; |
3030 | } | 3032 | } |
3031 | 3033 | ||
3032 | return -EFAULT; | 3034 | return RET_PF_EMULATE; |
3033 | } | 3035 | } |
3034 | 3036 | ||
3035 | static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, | 3037 | static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b3e488a74828..be9c839e2c89 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/debugreg.h> | 49 | #include <asm/debugreg.h> |
50 | #include <asm/kvm_para.h> | 50 | #include <asm/kvm_para.h> |
51 | #include <asm/irq_remapping.h> | 51 | #include <asm/irq_remapping.h> |
52 | #include <asm/microcode.h> | ||
52 | #include <asm/nospec-branch.h> | 53 | #include <asm/nospec-branch.h> |
53 | 54 | ||
54 | #include <asm/virtext.h> | 55 | #include <asm/virtext.h> |
@@ -178,6 +179,8 @@ struct vcpu_svm { | |||
178 | uint64_t sysenter_eip; | 179 | uint64_t sysenter_eip; |
179 | uint64_t tsc_aux; | 180 | uint64_t tsc_aux; |
180 | 181 | ||
182 | u64 msr_decfg; | ||
183 | |||
181 | u64 next_rip; | 184 | u64 next_rip; |
182 | 185 | ||
183 | u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; | 186 | u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; |
@@ -300,6 +303,8 @@ module_param(vgif, int, 0444); | |||
300 | static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); | 303 | static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); |
301 | module_param(sev, int, 0444); | 304 | module_param(sev, int, 0444); |
302 | 305 | ||
306 | static u8 rsm_ins_bytes[] = "\x0f\xaa"; | ||
307 | |||
303 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | 308 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
304 | static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa); | 309 | static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa); |
305 | static void svm_complete_interrupts(struct vcpu_svm *svm); | 310 | static void svm_complete_interrupts(struct vcpu_svm *svm); |
@@ -1383,6 +1388,7 @@ static void init_vmcb(struct vcpu_svm *svm) | |||
1383 | set_intercept(svm, INTERCEPT_SKINIT); | 1388 | set_intercept(svm, INTERCEPT_SKINIT); |
1384 | set_intercept(svm, INTERCEPT_WBINVD); | 1389 | set_intercept(svm, INTERCEPT_WBINVD); |
1385 | set_intercept(svm, INTERCEPT_XSETBV); | 1390 | set_intercept(svm, INTERCEPT_XSETBV); |
1391 | set_intercept(svm, INTERCEPT_RSM); | ||
1386 | 1392 | ||
1387 | if (!kvm_mwait_in_guest()) { | 1393 | if (!kvm_mwait_in_guest()) { |
1388 | set_intercept(svm, INTERCEPT_MONITOR); | 1394 | set_intercept(svm, INTERCEPT_MONITOR); |
@@ -1902,6 +1908,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
1902 | u32 dummy; | 1908 | u32 dummy; |
1903 | u32 eax = 1; | 1909 | u32 eax = 1; |
1904 | 1910 | ||
1911 | vcpu->arch.microcode_version = 0x01000065; | ||
1905 | svm->spec_ctrl = 0; | 1912 | svm->spec_ctrl = 0; |
1906 | 1913 | ||
1907 | if (!init_event) { | 1914 | if (!init_event) { |
@@ -3699,6 +3706,12 @@ static int emulate_on_interception(struct vcpu_svm *svm) | |||
3699 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; | 3706 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; |
3700 | } | 3707 | } |
3701 | 3708 | ||
3709 | static int rsm_interception(struct vcpu_svm *svm) | ||
3710 | { | ||
3711 | return x86_emulate_instruction(&svm->vcpu, 0, 0, | ||
3712 | rsm_ins_bytes, 2) == EMULATE_DONE; | ||
3713 | } | ||
3714 | |||
3702 | static int rdpmc_interception(struct vcpu_svm *svm) | 3715 | static int rdpmc_interception(struct vcpu_svm *svm) |
3703 | { | 3716 | { |
3704 | int err; | 3717 | int err; |
@@ -3860,6 +3873,22 @@ static int cr8_write_interception(struct vcpu_svm *svm) | |||
3860 | return 0; | 3873 | return 0; |
3861 | } | 3874 | } |
3862 | 3875 | ||
3876 | static int svm_get_msr_feature(struct kvm_msr_entry *msr) | ||
3877 | { | ||
3878 | msr->data = 0; | ||
3879 | |||
3880 | switch (msr->index) { | ||
3881 | case MSR_F10H_DECFG: | ||
3882 | if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) | ||
3883 | msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; | ||
3884 | break; | ||
3885 | default: | ||
3886 | return 1; | ||
3887 | } | ||
3888 | |||
3889 | return 0; | ||
3890 | } | ||
3891 | |||
3863 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | 3892 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
3864 | { | 3893 | { |
3865 | struct vcpu_svm *svm = to_svm(vcpu); | 3894 | struct vcpu_svm *svm = to_svm(vcpu); |
@@ -3935,9 +3964,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3935 | 3964 | ||
3936 | msr_info->data = svm->spec_ctrl; | 3965 | msr_info->data = svm->spec_ctrl; |
3937 | break; | 3966 | break; |
3938 | case MSR_IA32_UCODE_REV: | ||
3939 | msr_info->data = 0x01000065; | ||
3940 | break; | ||
3941 | case MSR_F15H_IC_CFG: { | 3967 | case MSR_F15H_IC_CFG: { |
3942 | 3968 | ||
3943 | int family, model; | 3969 | int family, model; |
@@ -3955,6 +3981,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3955 | msr_info->data = 0x1E; | 3981 | msr_info->data = 0x1E; |
3956 | } | 3982 | } |
3957 | break; | 3983 | break; |
3984 | case MSR_F10H_DECFG: | ||
3985 | msr_info->data = svm->msr_decfg; | ||
3986 | break; | ||
3958 | default: | 3987 | default: |
3959 | return kvm_get_msr_common(vcpu, msr_info); | 3988 | return kvm_get_msr_common(vcpu, msr_info); |
3960 | } | 3989 | } |
@@ -4133,6 +4162,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
4133 | case MSR_VM_IGNNE: | 4162 | case MSR_VM_IGNNE: |
4134 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); | 4163 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); |
4135 | break; | 4164 | break; |
4165 | case MSR_F10H_DECFG: { | ||
4166 | struct kvm_msr_entry msr_entry; | ||
4167 | |||
4168 | msr_entry.index = msr->index; | ||
4169 | if (svm_get_msr_feature(&msr_entry)) | ||
4170 | return 1; | ||
4171 | |||
4172 | /* Check the supported bits */ | ||
4173 | if (data & ~msr_entry.data) | ||
4174 | return 1; | ||
4175 | |||
4176 | /* Don't allow the guest to change a bit, #GP */ | ||
4177 | if (!msr->host_initiated && (data ^ msr_entry.data)) | ||
4178 | return 1; | ||
4179 | |||
4180 | svm->msr_decfg = data; | ||
4181 | break; | ||
4182 | } | ||
4136 | case MSR_IA32_APICBASE: | 4183 | case MSR_IA32_APICBASE: |
4137 | if (kvm_vcpu_apicv_active(vcpu)) | 4184 | if (kvm_vcpu_apicv_active(vcpu)) |
4138 | avic_update_vapic_bar(to_svm(vcpu), data); | 4185 | avic_update_vapic_bar(to_svm(vcpu), data); |
@@ -4541,7 +4588,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { | |||
4541 | [SVM_EXIT_MWAIT] = mwait_interception, | 4588 | [SVM_EXIT_MWAIT] = mwait_interception, |
4542 | [SVM_EXIT_XSETBV] = xsetbv_interception, | 4589 | [SVM_EXIT_XSETBV] = xsetbv_interception, |
4543 | [SVM_EXIT_NPF] = npf_interception, | 4590 | [SVM_EXIT_NPF] = npf_interception, |
4544 | [SVM_EXIT_RSM] = emulate_on_interception, | 4591 | [SVM_EXIT_RSM] = rsm_interception, |
4545 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, | 4592 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, |
4546 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, | 4593 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, |
4547 | }; | 4594 | }; |
@@ -5355,7 +5402,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
5355 | * being speculatively taken. | 5402 | * being speculatively taken. |
5356 | */ | 5403 | */ |
5357 | if (svm->spec_ctrl) | 5404 | if (svm->spec_ctrl) |
5358 | wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); | 5405 | native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); |
5359 | 5406 | ||
5360 | asm volatile ( | 5407 | asm volatile ( |
5361 | "push %%" _ASM_BP "; \n\t" | 5408 | "push %%" _ASM_BP "; \n\t" |
@@ -5464,11 +5511,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
5464 | * If the L02 MSR bitmap does not intercept the MSR, then we need to | 5511 | * If the L02 MSR bitmap does not intercept the MSR, then we need to |
5465 | * save it. | 5512 | * save it. |
5466 | */ | 5513 | */ |
5467 | if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) | 5514 | if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
5468 | rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); | 5515 | svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
5469 | 5516 | ||
5470 | if (svm->spec_ctrl) | 5517 | if (svm->spec_ctrl) |
5471 | wrmsrl(MSR_IA32_SPEC_CTRL, 0); | 5518 | native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); |
5472 | 5519 | ||
5473 | /* Eliminate branch target predictions from guest mode */ | 5520 | /* Eliminate branch target predictions from guest mode */ |
5474 | vmexit_fill_RSB(); | 5521 | vmexit_fill_RSB(); |
@@ -6236,16 +6283,18 @@ e_free: | |||
6236 | 6283 | ||
6237 | static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) | 6284 | static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) |
6238 | { | 6285 | { |
6286 | void __user *measure = (void __user *)(uintptr_t)argp->data; | ||
6239 | struct kvm_sev_info *sev = &kvm->arch.sev_info; | 6287 | struct kvm_sev_info *sev = &kvm->arch.sev_info; |
6240 | struct sev_data_launch_measure *data; | 6288 | struct sev_data_launch_measure *data; |
6241 | struct kvm_sev_launch_measure params; | 6289 | struct kvm_sev_launch_measure params; |
6290 | void __user *p = NULL; | ||
6242 | void *blob = NULL; | 6291 | void *blob = NULL; |
6243 | int ret; | 6292 | int ret; |
6244 | 6293 | ||
6245 | if (!sev_guest(kvm)) | 6294 | if (!sev_guest(kvm)) |
6246 | return -ENOTTY; | 6295 | return -ENOTTY; |
6247 | 6296 | ||
6248 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) | 6297 | if (copy_from_user(¶ms, measure, sizeof(params))) |
6249 | return -EFAULT; | 6298 | return -EFAULT; |
6250 | 6299 | ||
6251 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 6300 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
@@ -6256,17 +6305,13 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) | |||
6256 | if (!params.len) | 6305 | if (!params.len) |
6257 | goto cmd; | 6306 | goto cmd; |
6258 | 6307 | ||
6259 | if (params.uaddr) { | 6308 | p = (void __user *)(uintptr_t)params.uaddr; |
6309 | if (p) { | ||
6260 | if (params.len > SEV_FW_BLOB_MAX_SIZE) { | 6310 | if (params.len > SEV_FW_BLOB_MAX_SIZE) { |
6261 | ret = -EINVAL; | 6311 | ret = -EINVAL; |
6262 | goto e_free; | 6312 | goto e_free; |
6263 | } | 6313 | } |
6264 | 6314 | ||
6265 | if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) { | ||
6266 | ret = -EFAULT; | ||
6267 | goto e_free; | ||
6268 | } | ||
6269 | |||
6270 | ret = -ENOMEM; | 6315 | ret = -ENOMEM; |
6271 | blob = kmalloc(params.len, GFP_KERNEL); | 6316 | blob = kmalloc(params.len, GFP_KERNEL); |
6272 | if (!blob) | 6317 | if (!blob) |
@@ -6290,13 +6335,13 @@ cmd: | |||
6290 | goto e_free_blob; | 6335 | goto e_free_blob; |
6291 | 6336 | ||
6292 | if (blob) { | 6337 | if (blob) { |
6293 | if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len)) | 6338 | if (copy_to_user(p, blob, params.len)) |
6294 | ret = -EFAULT; | 6339 | ret = -EFAULT; |
6295 | } | 6340 | } |
6296 | 6341 | ||
6297 | done: | 6342 | done: |
6298 | params.len = data->len; | 6343 | params.len = data->len; |
6299 | if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) | 6344 | if (copy_to_user(measure, ¶ms, sizeof(params))) |
6300 | ret = -EFAULT; | 6345 | ret = -EFAULT; |
6301 | e_free_blob: | 6346 | e_free_blob: |
6302 | kfree(blob); | 6347 | kfree(blob); |
@@ -6597,7 +6642,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) | |||
6597 | struct page **pages; | 6642 | struct page **pages; |
6598 | void *blob, *hdr; | 6643 | void *blob, *hdr; |
6599 | unsigned long n; | 6644 | unsigned long n; |
6600 | int ret; | 6645 | int ret, offset; |
6601 | 6646 | ||
6602 | if (!sev_guest(kvm)) | 6647 | if (!sev_guest(kvm)) |
6603 | return -ENOTTY; | 6648 | return -ENOTTY; |
@@ -6623,6 +6668,10 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) | |||
6623 | if (!data) | 6668 | if (!data) |
6624 | goto e_unpin_memory; | 6669 | goto e_unpin_memory; |
6625 | 6670 | ||
6671 | offset = params.guest_uaddr & (PAGE_SIZE - 1); | ||
6672 | data->guest_address = __sme_page_pa(pages[0]) + offset; | ||
6673 | data->guest_len = params.guest_len; | ||
6674 | |||
6626 | blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); | 6675 | blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); |
6627 | if (IS_ERR(blob)) { | 6676 | if (IS_ERR(blob)) { |
6628 | ret = PTR_ERR(blob); | 6677 | ret = PTR_ERR(blob); |
@@ -6637,8 +6686,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) | |||
6637 | ret = PTR_ERR(hdr); | 6686 | ret = PTR_ERR(hdr); |
6638 | goto e_free_blob; | 6687 | goto e_free_blob; |
6639 | } | 6688 | } |
6640 | data->trans_address = __psp_pa(blob); | 6689 | data->hdr_address = __psp_pa(hdr); |
6641 | data->trans_len = params.trans_len; | 6690 | data->hdr_len = params.hdr_len; |
6642 | 6691 | ||
6643 | data->handle = sev->handle; | 6692 | data->handle = sev->handle; |
6644 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error); | 6693 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error); |
@@ -6821,6 +6870,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
6821 | .vcpu_unblocking = svm_vcpu_unblocking, | 6870 | .vcpu_unblocking = svm_vcpu_unblocking, |
6822 | 6871 | ||
6823 | .update_bp_intercept = update_bp_intercept, | 6872 | .update_bp_intercept = update_bp_intercept, |
6873 | .get_msr_feature = svm_get_msr_feature, | ||
6824 | .get_msr = svm_get_msr, | 6874 | .get_msr = svm_get_msr, |
6825 | .set_msr = svm_set_msr, | 6875 | .set_msr = svm_set_msr, |
6826 | .get_segment_base = svm_get_segment_base, | 6876 | .get_segment_base = svm_get_segment_base, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3dec126aa302..051dab74e4e9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/apic.h> | 51 | #include <asm/apic.h> |
52 | #include <asm/irq_remapping.h> | 52 | #include <asm/irq_remapping.h> |
53 | #include <asm/mmu_context.h> | 53 | #include <asm/mmu_context.h> |
54 | #include <asm/microcode.h> | ||
54 | #include <asm/nospec-branch.h> | 55 | #include <asm/nospec-branch.h> |
55 | 56 | ||
56 | #include "trace.h" | 57 | #include "trace.h" |
@@ -3226,6 +3227,11 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, | |||
3226 | return !(val & ~valid_bits); | 3227 | return !(val & ~valid_bits); |
3227 | } | 3228 | } |
3228 | 3229 | ||
3230 | static int vmx_get_msr_feature(struct kvm_msr_entry *msr) | ||
3231 | { | ||
3232 | return 1; | ||
3233 | } | ||
3234 | |||
3229 | /* | 3235 | /* |
3230 | * Reads an msr value (of 'msr_index') into 'pdata'. | 3236 | * Reads an msr value (of 'msr_index') into 'pdata'. |
3231 | * Returns 0 on success, non-0 otherwise. | 3237 | * Returns 0 on success, non-0 otherwise. |
@@ -4485,7 +4491,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
4485 | vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, | 4491 | vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, |
4486 | SECONDARY_EXEC_DESC); | 4492 | SECONDARY_EXEC_DESC); |
4487 | hw_cr4 &= ~X86_CR4_UMIP; | 4493 | hw_cr4 &= ~X86_CR4_UMIP; |
4488 | } else | 4494 | } else if (!is_guest_mode(vcpu) || |
4495 | !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) | ||
4489 | vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, | 4496 | vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, |
4490 | SECONDARY_EXEC_DESC); | 4497 | SECONDARY_EXEC_DESC); |
4491 | 4498 | ||
@@ -5765,6 +5772,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
5765 | vmx->rmode.vm86_active = 0; | 5772 | vmx->rmode.vm86_active = 0; |
5766 | vmx->spec_ctrl = 0; | 5773 | vmx->spec_ctrl = 0; |
5767 | 5774 | ||
5775 | vcpu->arch.microcode_version = 0x100000000ULL; | ||
5768 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); | 5776 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); |
5769 | kvm_set_cr8(vcpu, 0); | 5777 | kvm_set_cr8(vcpu, 0); |
5770 | 5778 | ||
@@ -9452,7 +9460,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
9452 | * being speculatively taken. | 9460 | * being speculatively taken. |
9453 | */ | 9461 | */ |
9454 | if (vmx->spec_ctrl) | 9462 | if (vmx->spec_ctrl) |
9455 | wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); | 9463 | native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); |
9456 | 9464 | ||
9457 | vmx->__launched = vmx->loaded_vmcs->launched; | 9465 | vmx->__launched = vmx->loaded_vmcs->launched; |
9458 | asm( | 9466 | asm( |
@@ -9587,11 +9595,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
9587 | * If the L02 MSR bitmap does not intercept the MSR, then we need to | 9595 | * If the L02 MSR bitmap does not intercept the MSR, then we need to |
9588 | * save it. | 9596 | * save it. |
9589 | */ | 9597 | */ |
9590 | if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) | 9598 | if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
9591 | rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); | 9599 | vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
9592 | 9600 | ||
9593 | if (vmx->spec_ctrl) | 9601 | if (vmx->spec_ctrl) |
9594 | wrmsrl(MSR_IA32_SPEC_CTRL, 0); | 9602 | native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); |
9595 | 9603 | ||
9596 | /* Eliminate branch target predictions from guest mode */ | 9604 | /* Eliminate branch target predictions from guest mode */ |
9597 | vmexit_fill_RSB(); | 9605 | vmexit_fill_RSB(); |
@@ -11199,7 +11207,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
11199 | if (ret) | 11207 | if (ret) |
11200 | return ret; | 11208 | return ret; |
11201 | 11209 | ||
11202 | if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) | 11210 | /* |
11211 | * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken | ||
11212 | * by event injection, halt vcpu. | ||
11213 | */ | ||
11214 | if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && | ||
11215 | !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) | ||
11203 | return kvm_vcpu_halt(vcpu); | 11216 | return kvm_vcpu_halt(vcpu); |
11204 | 11217 | ||
11205 | vmx->nested.nested_run_pending = 1; | 11218 | vmx->nested.nested_run_pending = 1; |
@@ -12290,6 +12303,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
12290 | .vcpu_put = vmx_vcpu_put, | 12303 | .vcpu_put = vmx_vcpu_put, |
12291 | 12304 | ||
12292 | .update_bp_intercept = update_exception_bitmap, | 12305 | .update_bp_intercept = update_exception_bitmap, |
12306 | .get_msr_feature = vmx_get_msr_feature, | ||
12293 | .get_msr = vmx_get_msr, | 12307 | .get_msr = vmx_get_msr, |
12294 | .set_msr = vmx_set_msr, | 12308 | .set_msr = vmx_set_msr, |
12295 | .get_segment_base = vmx_get_segment_base, | 12309 | .get_segment_base = vmx_get_segment_base, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c8a0b545ac20..18b5ca7a3197 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1049,6 +1049,45 @@ static u32 emulated_msrs[] = { | |||
1049 | 1049 | ||
1050 | static unsigned num_emulated_msrs; | 1050 | static unsigned num_emulated_msrs; |
1051 | 1051 | ||
1052 | /* | ||
1053 | * List of msr numbers which are used to expose MSR-based features that | ||
1054 | * can be used by a hypervisor to validate requested CPU features. | ||
1055 | */ | ||
1056 | static u32 msr_based_features[] = { | ||
1057 | MSR_F10H_DECFG, | ||
1058 | MSR_IA32_UCODE_REV, | ||
1059 | }; | ||
1060 | |||
1061 | static unsigned int num_msr_based_features; | ||
1062 | |||
1063 | static int kvm_get_msr_feature(struct kvm_msr_entry *msr) | ||
1064 | { | ||
1065 | switch (msr->index) { | ||
1066 | case MSR_IA32_UCODE_REV: | ||
1067 | rdmsrl(msr->index, msr->data); | ||
1068 | break; | ||
1069 | default: | ||
1070 | if (kvm_x86_ops->get_msr_feature(msr)) | ||
1071 | return 1; | ||
1072 | } | ||
1073 | return 0; | ||
1074 | } | ||
1075 | |||
1076 | static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) | ||
1077 | { | ||
1078 | struct kvm_msr_entry msr; | ||
1079 | int r; | ||
1080 | |||
1081 | msr.index = index; | ||
1082 | r = kvm_get_msr_feature(&msr); | ||
1083 | if (r) | ||
1084 | return r; | ||
1085 | |||
1086 | *data = msr.data; | ||
1087 | |||
1088 | return 0; | ||
1089 | } | ||
1090 | |||
1052 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) | 1091 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) |
1053 | { | 1092 | { |
1054 | if (efer & efer_reserved_bits) | 1093 | if (efer & efer_reserved_bits) |
@@ -2222,7 +2261,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2222 | 2261 | ||
2223 | switch (msr) { | 2262 | switch (msr) { |
2224 | case MSR_AMD64_NB_CFG: | 2263 | case MSR_AMD64_NB_CFG: |
2225 | case MSR_IA32_UCODE_REV: | ||
2226 | case MSR_IA32_UCODE_WRITE: | 2264 | case MSR_IA32_UCODE_WRITE: |
2227 | case MSR_VM_HSAVE_PA: | 2265 | case MSR_VM_HSAVE_PA: |
2228 | case MSR_AMD64_PATCH_LOADER: | 2266 | case MSR_AMD64_PATCH_LOADER: |
@@ -2230,6 +2268,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2230 | case MSR_AMD64_DC_CFG: | 2268 | case MSR_AMD64_DC_CFG: |
2231 | break; | 2269 | break; |
2232 | 2270 | ||
2271 | case MSR_IA32_UCODE_REV: | ||
2272 | if (msr_info->host_initiated) | ||
2273 | vcpu->arch.microcode_version = data; | ||
2274 | break; | ||
2233 | case MSR_EFER: | 2275 | case MSR_EFER: |
2234 | return set_efer(vcpu, data); | 2276 | return set_efer(vcpu, data); |
2235 | case MSR_K7_HWCR: | 2277 | case MSR_K7_HWCR: |
@@ -2525,7 +2567,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2525 | msr_info->data = 0; | 2567 | msr_info->data = 0; |
2526 | break; | 2568 | break; |
2527 | case MSR_IA32_UCODE_REV: | 2569 | case MSR_IA32_UCODE_REV: |
2528 | msr_info->data = 0x100000000ULL; | 2570 | msr_info->data = vcpu->arch.microcode_version; |
2529 | break; | 2571 | break; |
2530 | case MSR_MTRRcap: | 2572 | case MSR_MTRRcap: |
2531 | case 0x200 ... 0x2ff: | 2573 | case 0x200 ... 0x2ff: |
@@ -2680,13 +2722,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, | |||
2680 | int (*do_msr)(struct kvm_vcpu *vcpu, | 2722 | int (*do_msr)(struct kvm_vcpu *vcpu, |
2681 | unsigned index, u64 *data)) | 2723 | unsigned index, u64 *data)) |
2682 | { | 2724 | { |
2683 | int i, idx; | 2725 | int i; |
2684 | 2726 | ||
2685 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
2686 | for (i = 0; i < msrs->nmsrs; ++i) | 2727 | for (i = 0; i < msrs->nmsrs; ++i) |
2687 | if (do_msr(vcpu, entries[i].index, &entries[i].data)) | 2728 | if (do_msr(vcpu, entries[i].index, &entries[i].data)) |
2688 | break; | 2729 | break; |
2689 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
2690 | 2730 | ||
2691 | return i; | 2731 | return i; |
2692 | } | 2732 | } |
@@ -2785,6 +2825,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
2785 | case KVM_CAP_SET_BOOT_CPU_ID: | 2825 | case KVM_CAP_SET_BOOT_CPU_ID: |
2786 | case KVM_CAP_SPLIT_IRQCHIP: | 2826 | case KVM_CAP_SPLIT_IRQCHIP: |
2787 | case KVM_CAP_IMMEDIATE_EXIT: | 2827 | case KVM_CAP_IMMEDIATE_EXIT: |
2828 | case KVM_CAP_GET_MSR_FEATURES: | ||
2788 | r = 1; | 2829 | r = 1; |
2789 | break; | 2830 | break; |
2790 | case KVM_CAP_ADJUST_CLOCK: | 2831 | case KVM_CAP_ADJUST_CLOCK: |
@@ -2899,6 +2940,31 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
2899 | goto out; | 2940 | goto out; |
2900 | r = 0; | 2941 | r = 0; |
2901 | break; | 2942 | break; |
2943 | case KVM_GET_MSR_FEATURE_INDEX_LIST: { | ||
2944 | struct kvm_msr_list __user *user_msr_list = argp; | ||
2945 | struct kvm_msr_list msr_list; | ||
2946 | unsigned int n; | ||
2947 | |||
2948 | r = -EFAULT; | ||
2949 | if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) | ||
2950 | goto out; | ||
2951 | n = msr_list.nmsrs; | ||
2952 | msr_list.nmsrs = num_msr_based_features; | ||
2953 | if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) | ||
2954 | goto out; | ||
2955 | r = -E2BIG; | ||
2956 | if (n < msr_list.nmsrs) | ||
2957 | goto out; | ||
2958 | r = -EFAULT; | ||
2959 | if (copy_to_user(user_msr_list->indices, &msr_based_features, | ||
2960 | num_msr_based_features * sizeof(u32))) | ||
2961 | goto out; | ||
2962 | r = 0; | ||
2963 | break; | ||
2964 | } | ||
2965 | case KVM_GET_MSRS: | ||
2966 | r = msr_io(NULL, argp, do_get_msr_feature, 1); | ||
2967 | break; | ||
2902 | } | 2968 | } |
2903 | default: | 2969 | default: |
2904 | r = -EINVAL; | 2970 | r = -EINVAL; |
@@ -3636,12 +3702,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
3636 | r = 0; | 3702 | r = 0; |
3637 | break; | 3703 | break; |
3638 | } | 3704 | } |
3639 | case KVM_GET_MSRS: | 3705 | case KVM_GET_MSRS: { |
3706 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
3640 | r = msr_io(vcpu, argp, do_get_msr, 1); | 3707 | r = msr_io(vcpu, argp, do_get_msr, 1); |
3708 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
3641 | break; | 3709 | break; |
3642 | case KVM_SET_MSRS: | 3710 | } |
3711 | case KVM_SET_MSRS: { | ||
3712 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
3643 | r = msr_io(vcpu, argp, do_set_msr, 0); | 3713 | r = msr_io(vcpu, argp, do_set_msr, 0); |
3714 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
3644 | break; | 3715 | break; |
3716 | } | ||
3645 | case KVM_TPR_ACCESS_REPORTING: { | 3717 | case KVM_TPR_ACCESS_REPORTING: { |
3646 | struct kvm_tpr_access_ctl tac; | 3718 | struct kvm_tpr_access_ctl tac; |
3647 | 3719 | ||
@@ -4464,6 +4536,19 @@ static void kvm_init_msr_list(void) | |||
4464 | j++; | 4536 | j++; |
4465 | } | 4537 | } |
4466 | num_emulated_msrs = j; | 4538 | num_emulated_msrs = j; |
4539 | |||
4540 | for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) { | ||
4541 | struct kvm_msr_entry msr; | ||
4542 | |||
4543 | msr.index = msr_based_features[i]; | ||
4544 | if (kvm_get_msr_feature(&msr)) | ||
4545 | continue; | ||
4546 | |||
4547 | if (j < i) | ||
4548 | msr_based_features[j] = msr_based_features[i]; | ||
4549 | j++; | ||
4550 | } | ||
4551 | num_msr_based_features = j; | ||
4467 | } | 4552 | } |
4468 | 4553 | ||
4469 | static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, | 4554 | static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, |
@@ -8017,6 +8102,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
8017 | 8102 | ||
8018 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | 8103 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
8019 | { | 8104 | { |
8105 | kvm_lapic_reset(vcpu, init_event); | ||
8106 | |||
8020 | vcpu->arch.hflags = 0; | 8107 | vcpu->arch.hflags = 0; |
8021 | 8108 | ||
8022 | vcpu->arch.smi_pending = 0; | 8109 | vcpu->arch.smi_pending = 0; |
@@ -8460,10 +8547,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) | |||
8460 | return r; | 8547 | return r; |
8461 | } | 8548 | } |
8462 | 8549 | ||
8463 | if (!size) { | 8550 | if (!size) |
8464 | r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); | 8551 | vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); |
8465 | WARN_ON(r < 0); | ||
8466 | } | ||
8467 | 8552 | ||
8468 | return 0; | 8553 | return 0; |
8469 | } | 8554 | } |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 91e9700cc6dc..25a972c61b0a 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -28,7 +28,6 @@ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o | |||
28 | lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o | 28 | lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o |
29 | lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o | 29 | lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o |
30 | lib-$(CONFIG_RETPOLINE) += retpoline.o | 30 | lib-$(CONFIG_RETPOLINE) += retpoline.o |
31 | OBJECT_FILES_NON_STANDARD_retpoline.o :=y | ||
32 | 31 | ||
33 | obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o | 32 | obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o |
34 | 33 | ||
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 480edc3a5e03..c909961e678a 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <asm/alternative-asm.h> | 7 | #include <asm/alternative-asm.h> |
8 | #include <asm/export.h> | 8 | #include <asm/export.h> |
9 | #include <asm/nospec-branch.h> | 9 | #include <asm/nospec-branch.h> |
10 | #include <asm/bitsperlong.h> | ||
11 | 10 | ||
12 | .macro THUNK reg | 11 | .macro THUNK reg |
13 | .section .text.__x86.indirect_thunk | 12 | .section .text.__x86.indirect_thunk |
@@ -47,58 +46,3 @@ GENERATE_THUNK(r13) | |||
47 | GENERATE_THUNK(r14) | 46 | GENERATE_THUNK(r14) |
48 | GENERATE_THUNK(r15) | 47 | GENERATE_THUNK(r15) |
49 | #endif | 48 | #endif |
50 | |||
51 | /* | ||
52 | * Fill the CPU return stack buffer. | ||
53 | * | ||
54 | * Each entry in the RSB, if used for a speculative 'ret', contains an | ||
55 | * infinite 'pause; lfence; jmp' loop to capture speculative execution. | ||
56 | * | ||
57 | * This is required in various cases for retpoline and IBRS-based | ||
58 | * mitigations for the Spectre variant 2 vulnerability. Sometimes to | ||
59 | * eliminate potentially bogus entries from the RSB, and sometimes | ||
60 | * purely to ensure that it doesn't get empty, which on some CPUs would | ||
61 | * allow predictions from other (unwanted!) sources to be used. | ||
62 | * | ||
63 | * Google experimented with loop-unrolling and this turned out to be | ||
64 | * the optimal version - two calls, each with their own speculation | ||
65 | * trap should their return address end up getting used, in a loop. | ||
66 | */ | ||
67 | .macro STUFF_RSB nr:req sp:req | ||
68 | mov $(\nr / 2), %_ASM_BX | ||
69 | .align 16 | ||
70 | 771: | ||
71 | call 772f | ||
72 | 773: /* speculation trap */ | ||
73 | pause | ||
74 | lfence | ||
75 | jmp 773b | ||
76 | .align 16 | ||
77 | 772: | ||
78 | call 774f | ||
79 | 775: /* speculation trap */ | ||
80 | pause | ||
81 | lfence | ||
82 | jmp 775b | ||
83 | .align 16 | ||
84 | 774: | ||
85 | dec %_ASM_BX | ||
86 | jnz 771b | ||
87 | add $((BITS_PER_LONG/8) * \nr), \sp | ||
88 | .endm | ||
89 | |||
90 | #define RSB_FILL_LOOPS 16 /* To avoid underflow */ | ||
91 | |||
92 | ENTRY(__fill_rsb) | ||
93 | STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP | ||
94 | ret | ||
95 | END(__fill_rsb) | ||
96 | EXPORT_SYMBOL_GPL(__fill_rsb) | ||
97 | |||
98 | #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ | ||
99 | |||
100 | ENTRY(__clear_rsb) | ||
101 | STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP | ||
102 | ret | ||
103 | END(__clear_rsb) | ||
104 | EXPORT_SYMBOL_GPL(__clear_rsb) | ||
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index b9283cc27622..476d810639a8 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c | |||
@@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void) | |||
163 | 163 | ||
164 | for_each_possible_cpu(cpu) | 164 | for_each_possible_cpu(cpu) |
165 | setup_cpu_entry_area(cpu); | 165 | setup_cpu_entry_area(cpu); |
166 | |||
167 | /* | ||
168 | * This is the last essential update to swapper_pgdir which needs | ||
169 | * to be synchronized to initial_page_table on 32bit. | ||
170 | */ | ||
171 | sync_initial_page_table(); | ||
166 | } | 172 | } |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 800de815519c..25a30b5d6582 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -330,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address) | |||
330 | if (!pmd_k) | 330 | if (!pmd_k) |
331 | return -1; | 331 | return -1; |
332 | 332 | ||
333 | if (pmd_huge(*pmd_k)) | 333 | if (pmd_large(*pmd_k)) |
334 | return 0; | 334 | return 0; |
335 | 335 | ||
336 | pte_k = pte_offset_kernel(pmd_k, address); | 336 | pte_k = pte_offset_kernel(pmd_k, address); |
@@ -475,7 +475,7 @@ static noinline int vmalloc_fault(unsigned long address) | |||
475 | if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) | 475 | if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) |
476 | BUG(); | 476 | BUG(); |
477 | 477 | ||
478 | if (pud_huge(*pud)) | 478 | if (pud_large(*pud)) |
479 | return 0; | 479 | return 0; |
480 | 480 | ||
481 | pmd = pmd_offset(pud, address); | 481 | pmd = pmd_offset(pud, address); |
@@ -486,7 +486,7 @@ static noinline int vmalloc_fault(unsigned long address) | |||
486 | if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) | 486 | if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) |
487 | BUG(); | 487 | BUG(); |
488 | 488 | ||
489 | if (pmd_huge(*pmd)) | 489 | if (pmd_large(*pmd)) |
490 | return 0; | 490 | return 0; |
491 | 491 | ||
492 | pte_ref = pte_offset_kernel(pmd_ref, address); | 492 | pte_ref = pte_offset_kernel(pmd_ref, address); |
@@ -1248,10 +1248,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, | |||
1248 | tsk = current; | 1248 | tsk = current; |
1249 | mm = tsk->mm; | 1249 | mm = tsk->mm; |
1250 | 1250 | ||
1251 | /* | ||
1252 | * Detect and handle instructions that would cause a page fault for | ||
1253 | * both a tracked kernel page and a userspace page. | ||
1254 | */ | ||
1255 | prefetchw(&mm->mmap_sem); | 1251 | prefetchw(&mm->mmap_sem); |
1256 | 1252 | ||
1257 | if (unlikely(kmmio_fault(regs, address))) | 1253 | if (unlikely(kmmio_fault(regs, address))) |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 79cb066f40c0..396e1f0151ac 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base) | |||
453 | } | 453 | } |
454 | #endif /* CONFIG_HIGHMEM */ | 454 | #endif /* CONFIG_HIGHMEM */ |
455 | 455 | ||
456 | void __init sync_initial_page_table(void) | ||
457 | { | ||
458 | clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, | ||
459 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
460 | KERNEL_PGD_PTRS); | ||
461 | |||
462 | /* | ||
463 | * sync back low identity map too. It is used for example | ||
464 | * in the 32-bit EFI stub. | ||
465 | */ | ||
466 | clone_pgd_range(initial_page_table, | ||
467 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
468 | min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); | ||
469 | } | ||
470 | |||
456 | void __init native_pagetable_init(void) | 471 | void __init native_pagetable_init(void) |
457 | { | 472 | { |
458 | unsigned long pfn, va; | 473 | unsigned long pfn, va; |
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 01f682cf77a8..40a6085063d6 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | #include <asm/processor-flags.h> | 16 | #include <asm/processor-flags.h> |
17 | #include <asm/msr-index.h> | 17 | #include <asm/msr-index.h> |
18 | #include <asm/nospec-branch.h> | ||
18 | 19 | ||
19 | .text | 20 | .text |
20 | .code64 | 21 | .code64 |
@@ -59,6 +60,7 @@ ENTRY(sme_encrypt_execute) | |||
59 | movq %rax, %r8 /* Workarea encryption routine */ | 60 | movq %rax, %r8 /* Workarea encryption routine */ |
60 | addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ | 61 | addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ |
61 | 62 | ||
63 | ANNOTATE_RETPOLINE_SAFE | ||
62 | call *%rax /* Call the encryption routine */ | 64 | call *%rax /* Call the encryption routine */ |
63 | 65 | ||
64 | pop %r12 | 66 | pop %r12 |
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index ce38f165489b..631507f0c198 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c | |||
@@ -332,7 +332,7 @@ static void __init pti_clone_user_shared(void) | |||
332 | } | 332 | } |
333 | 333 | ||
334 | /* | 334 | /* |
335 | * Clone the ESPFIX P4D into the user space visinble page table | 335 | * Clone the ESPFIX P4D into the user space visible page table |
336 | */ | 336 | */ |
337 | static void __init pti_setup_espfix64(void) | 337 | static void __init pti_setup_espfix64(void) |
338 | { | 338 | { |
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 2c67bae6bb53..fb1df9488e98 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c | |||
@@ -79,7 +79,7 @@ static void intel_mid_power_off(void) | |||
79 | 79 | ||
80 | static void intel_mid_reboot(void) | 80 | static void intel_mid_reboot(void) |
81 | { | 81 | { |
82 | intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); | 82 | intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0); |
83 | } | 83 | } |
84 | 84 | ||
85 | static unsigned long __init intel_mid_calibrate_tsc(void) | 85 | static unsigned long __init intel_mid_calibrate_tsc(void) |
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index de53bd15df5a..24bb7598774e 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S | |||
@@ -102,7 +102,7 @@ ENTRY(startup_32) | |||
102 | * don't we'll eventually crash trying to execute encrypted | 102 | * don't we'll eventually crash trying to execute encrypted |
103 | * instructions. | 103 | * instructions. |
104 | */ | 104 | */ |
105 | bt $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags | 105 | btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags |
106 | jnc .Ldone | 106 | jnc .Ldone |
107 | movl $MSR_K8_SYSCFG, %ecx | 107 | movl $MSR_K8_SYSCFG, %ecx |
108 | rdmsr | 108 | rdmsr |
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index c047f42552e1..3c2c2530737e 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c | |||
@@ -1376,8 +1376,6 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1376 | 1376 | ||
1377 | if (!xen_initial_domain()) { | 1377 | if (!xen_initial_domain()) { |
1378 | add_preferred_console("xenboot", 0, NULL); | 1378 | add_preferred_console("xenboot", 0, NULL); |
1379 | add_preferred_console("tty", 0, NULL); | ||
1380 | add_preferred_console("hvc", 0, NULL); | ||
1381 | if (pci_xen) | 1379 | if (pci_xen) |
1382 | x86_init.pci.arch_init = pci_xen_init; | 1380 | x86_init.pci.arch_init = pci_xen_init; |
1383 | } else { | 1381 | } else { |
@@ -1410,6 +1408,10 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1410 | 1408 | ||
1411 | xen_boot_params_init_edd(); | 1409 | xen_boot_params_init_edd(); |
1412 | } | 1410 | } |
1411 | |||
1412 | add_preferred_console("tty", 0, NULL); | ||
1413 | add_preferred_console("hvc", 0, NULL); | ||
1414 | |||
1413 | #ifdef CONFIG_PCI | 1415 | #ifdef CONFIG_PCI |
1414 | /* PCI BIOS service won't work from a PV guest. */ | 1416 | /* PCI BIOS service won't work from a PV guest. */ |
1415 | pci_probe &= ~PCI_PROBE_BIOS; | 1417 | pci_probe &= ~PCI_PROBE_BIOS; |
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index d9f96cc5d743..1d83152c761b 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -1,12 +1,15 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/types.h> | 2 | #include <linux/types.h> |
3 | #include <linux/tick.h> | 3 | #include <linux/tick.h> |
4 | #include <linux/percpu-defs.h> | ||
4 | 5 | ||
5 | #include <xen/xen.h> | 6 | #include <xen/xen.h> |
6 | #include <xen/interface/xen.h> | 7 | #include <xen/interface/xen.h> |
7 | #include <xen/grant_table.h> | 8 | #include <xen/grant_table.h> |
8 | #include <xen/events.h> | 9 | #include <xen/events.h> |
9 | 10 | ||
11 | #include <asm/cpufeatures.h> | ||
12 | #include <asm/msr-index.h> | ||
10 | #include <asm/xen/hypercall.h> | 13 | #include <asm/xen/hypercall.h> |
11 | #include <asm/xen/page.h> | 14 | #include <asm/xen/page.h> |
12 | #include <asm/fixmap.h> | 15 | #include <asm/fixmap.h> |
@@ -15,6 +18,8 @@ | |||
15 | #include "mmu.h" | 18 | #include "mmu.h" |
16 | #include "pmu.h" | 19 | #include "pmu.h" |
17 | 20 | ||
21 | static DEFINE_PER_CPU(u64, spec_ctrl); | ||
22 | |||
18 | void xen_arch_pre_suspend(void) | 23 | void xen_arch_pre_suspend(void) |
19 | { | 24 | { |
20 | xen_save_time_memory_area(); | 25 | xen_save_time_memory_area(); |
@@ -35,6 +40,9 @@ void xen_arch_post_suspend(int cancelled) | |||
35 | 40 | ||
36 | static void xen_vcpu_notify_restore(void *data) | 41 | static void xen_vcpu_notify_restore(void *data) |
37 | { | 42 | { |
43 | if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) | ||
44 | wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); | ||
45 | |||
38 | /* Boot processor notified via generic timekeeping_resume() */ | 46 | /* Boot processor notified via generic timekeeping_resume() */ |
39 | if (smp_processor_id() == 0) | 47 | if (smp_processor_id() == 0) |
40 | return; | 48 | return; |
@@ -44,7 +52,15 @@ static void xen_vcpu_notify_restore(void *data) | |||
44 | 52 | ||
45 | static void xen_vcpu_notify_suspend(void *data) | 53 | static void xen_vcpu_notify_suspend(void *data) |
46 | { | 54 | { |
55 | u64 tmp; | ||
56 | |||
47 | tick_suspend_local(); | 57 | tick_suspend_local(); |
58 | |||
59 | if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) { | ||
60 | rdmsrl(MSR_IA32_SPEC_CTRL, tmp); | ||
61 | this_cpu_write(spec_ctrl, tmp); | ||
62 | wrmsrl(MSR_IA32_SPEC_CTRL, 0); | ||
63 | } | ||
48 | } | 64 | } |
49 | 65 | ||
50 | void xen_arch_resume(void) | 66 | void xen_arch_resume(void) |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 4117524ca45b..c2033a232a44 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -812,7 +812,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |||
812 | struct gendisk *disk; | 812 | struct gendisk *disk; |
813 | struct request_queue *q; | 813 | struct request_queue *q; |
814 | struct blkcg_gq *blkg; | 814 | struct blkcg_gq *blkg; |
815 | struct module *owner; | ||
816 | unsigned int major, minor; | 815 | unsigned int major, minor; |
817 | int key_len, part, ret; | 816 | int key_len, part, ret; |
818 | char *body; | 817 | char *body; |
@@ -904,9 +903,7 @@ fail_unlock: | |||
904 | spin_unlock_irq(q->queue_lock); | 903 | spin_unlock_irq(q->queue_lock); |
905 | rcu_read_unlock(); | 904 | rcu_read_unlock(); |
906 | fail: | 905 | fail: |
907 | owner = disk->fops->owner; | 906 | put_disk_and_module(disk); |
908 | put_disk(disk); | ||
909 | module_put(owner); | ||
910 | /* | 907 | /* |
911 | * If queue was bypassing, we should retry. Do so after a | 908 | * If queue was bypassing, we should retry. Do so after a |
912 | * short msleep(). It isn't strictly necessary but queue | 909 | * short msleep(). It isn't strictly necessary but queue |
@@ -931,13 +928,9 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep); | |||
931 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) | 928 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
932 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) | 929 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
933 | { | 930 | { |
934 | struct module *owner; | ||
935 | |||
936 | spin_unlock_irq(ctx->disk->queue->queue_lock); | 931 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
937 | rcu_read_unlock(); | 932 | rcu_read_unlock(); |
938 | owner = ctx->disk->fops->owner; | 933 | put_disk_and_module(ctx->disk); |
939 | put_disk(ctx->disk); | ||
940 | module_put(owner); | ||
941 | } | 934 | } |
942 | EXPORT_SYMBOL_GPL(blkg_conf_finish); | 935 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
943 | 936 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index 2d1a7bbe0634..6d82c4f7fadd 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2434,7 +2434,7 @@ blk_qc_t submit_bio(struct bio *bio) | |||
2434 | unsigned int count; | 2434 | unsigned int count; |
2435 | 2435 | ||
2436 | if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) | 2436 | if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) |
2437 | count = queue_logical_block_size(bio->bi_disk->queue); | 2437 | count = queue_logical_block_size(bio->bi_disk->queue) >> 9; |
2438 | else | 2438 | else |
2439 | count = bio_sectors(bio); | 2439 | count = bio_sectors(bio); |
2440 | 2440 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 357492712b0e..16e83e6df404 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -712,7 +712,6 @@ static void __blk_mq_requeue_request(struct request *rq) | |||
712 | 712 | ||
713 | trace_block_rq_requeue(q, rq); | 713 | trace_block_rq_requeue(q, rq); |
714 | wbt_requeue(q->rq_wb, &rq->issue_stat); | 714 | wbt_requeue(q->rq_wb, &rq->issue_stat); |
715 | blk_mq_sched_requeue_request(rq); | ||
716 | 715 | ||
717 | if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) { | 716 | if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) { |
718 | blk_mq_rq_update_state(rq, MQ_RQ_IDLE); | 717 | blk_mq_rq_update_state(rq, MQ_RQ_IDLE); |
@@ -725,6 +724,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) | |||
725 | { | 724 | { |
726 | __blk_mq_requeue_request(rq); | 725 | __blk_mq_requeue_request(rq); |
727 | 726 | ||
727 | /* this request will be re-inserted to io scheduler queue */ | ||
728 | blk_mq_sched_requeue_request(rq); | ||
729 | |||
728 | BUG_ON(blk_queued_rq(rq)); | 730 | BUG_ON(blk_queued_rq(rq)); |
729 | blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); | 731 | blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); |
730 | } | 732 | } |
diff --git a/block/genhd.c b/block/genhd.c index 88a53c188cb7..9656f9e9f99e 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -547,7 +547,7 @@ static int exact_lock(dev_t devt, void *data) | |||
547 | { | 547 | { |
548 | struct gendisk *p = data; | 548 | struct gendisk *p = data; |
549 | 549 | ||
550 | if (!get_disk(p)) | 550 | if (!get_disk_and_module(p)) |
551 | return -1; | 551 | return -1; |
552 | return 0; | 552 | return 0; |
553 | } | 553 | } |
@@ -717,6 +717,11 @@ void del_gendisk(struct gendisk *disk) | |||
717 | blk_integrity_del(disk); | 717 | blk_integrity_del(disk); |
718 | disk_del_events(disk); | 718 | disk_del_events(disk); |
719 | 719 | ||
720 | /* | ||
721 | * Block lookups of the disk until all bdevs are unhashed and the | ||
722 | * disk is marked as dead (GENHD_FL_UP cleared). | ||
723 | */ | ||
724 | down_write(&disk->lookup_sem); | ||
720 | /* invalidate stuff */ | 725 | /* invalidate stuff */ |
721 | disk_part_iter_init(&piter, disk, | 726 | disk_part_iter_init(&piter, disk, |
722 | DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); | 727 | DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); |
@@ -731,6 +736,7 @@ void del_gendisk(struct gendisk *disk) | |||
731 | bdev_unhash_inode(disk_devt(disk)); | 736 | bdev_unhash_inode(disk_devt(disk)); |
732 | set_capacity(disk, 0); | 737 | set_capacity(disk, 0); |
733 | disk->flags &= ~GENHD_FL_UP; | 738 | disk->flags &= ~GENHD_FL_UP; |
739 | up_write(&disk->lookup_sem); | ||
734 | 740 | ||
735 | if (!(disk->flags & GENHD_FL_HIDDEN)) | 741 | if (!(disk->flags & GENHD_FL_HIDDEN)) |
736 | sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); | 742 | sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); |
@@ -809,16 +815,28 @@ struct gendisk *get_gendisk(dev_t devt, int *partno) | |||
809 | 815 | ||
810 | spin_lock_bh(&ext_devt_lock); | 816 | spin_lock_bh(&ext_devt_lock); |
811 | part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); | 817 | part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); |
812 | if (part && get_disk(part_to_disk(part))) { | 818 | if (part && get_disk_and_module(part_to_disk(part))) { |
813 | *partno = part->partno; | 819 | *partno = part->partno; |
814 | disk = part_to_disk(part); | 820 | disk = part_to_disk(part); |
815 | } | 821 | } |
816 | spin_unlock_bh(&ext_devt_lock); | 822 | spin_unlock_bh(&ext_devt_lock); |
817 | } | 823 | } |
818 | 824 | ||
819 | if (disk && unlikely(disk->flags & GENHD_FL_HIDDEN)) { | 825 | if (!disk) |
820 | put_disk(disk); | 826 | return NULL; |
827 | |||
828 | /* | ||
829 | * Synchronize with del_gendisk() to not return disk that is being | ||
830 | * destroyed. | ||
831 | */ | ||
832 | down_read(&disk->lookup_sem); | ||
833 | if (unlikely((disk->flags & GENHD_FL_HIDDEN) || | ||
834 | !(disk->flags & GENHD_FL_UP))) { | ||
835 | up_read(&disk->lookup_sem); | ||
836 | put_disk_and_module(disk); | ||
821 | disk = NULL; | 837 | disk = NULL; |
838 | } else { | ||
839 | up_read(&disk->lookup_sem); | ||
822 | } | 840 | } |
823 | return disk; | 841 | return disk; |
824 | } | 842 | } |
@@ -1418,6 +1436,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id) | |||
1418 | kfree(disk); | 1436 | kfree(disk); |
1419 | return NULL; | 1437 | return NULL; |
1420 | } | 1438 | } |
1439 | init_rwsem(&disk->lookup_sem); | ||
1421 | disk->node_id = node_id; | 1440 | disk->node_id = node_id; |
1422 | if (disk_expand_part_tbl(disk, 0)) { | 1441 | if (disk_expand_part_tbl(disk, 0)) { |
1423 | free_part_stats(&disk->part0); | 1442 | free_part_stats(&disk->part0); |
@@ -1453,7 +1472,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id) | |||
1453 | } | 1472 | } |
1454 | EXPORT_SYMBOL(__alloc_disk_node); | 1473 | EXPORT_SYMBOL(__alloc_disk_node); |
1455 | 1474 | ||
1456 | struct kobject *get_disk(struct gendisk *disk) | 1475 | struct kobject *get_disk_and_module(struct gendisk *disk) |
1457 | { | 1476 | { |
1458 | struct module *owner; | 1477 | struct module *owner; |
1459 | struct kobject *kobj; | 1478 | struct kobject *kobj; |
@@ -1471,17 +1490,30 @@ struct kobject *get_disk(struct gendisk *disk) | |||
1471 | return kobj; | 1490 | return kobj; |
1472 | 1491 | ||
1473 | } | 1492 | } |
1474 | 1493 | EXPORT_SYMBOL(get_disk_and_module); | |
1475 | EXPORT_SYMBOL(get_disk); | ||
1476 | 1494 | ||
1477 | void put_disk(struct gendisk *disk) | 1495 | void put_disk(struct gendisk *disk) |
1478 | { | 1496 | { |
1479 | if (disk) | 1497 | if (disk) |
1480 | kobject_put(&disk_to_dev(disk)->kobj); | 1498 | kobject_put(&disk_to_dev(disk)->kobj); |
1481 | } | 1499 | } |
1482 | |||
1483 | EXPORT_SYMBOL(put_disk); | 1500 | EXPORT_SYMBOL(put_disk); |
1484 | 1501 | ||
1502 | /* | ||
1503 | * This is a counterpart of get_disk_and_module() and thus also of | ||
1504 | * get_gendisk(). | ||
1505 | */ | ||
1506 | void put_disk_and_module(struct gendisk *disk) | ||
1507 | { | ||
1508 | if (disk) { | ||
1509 | struct module *owner = disk->fops->owner; | ||
1510 | |||
1511 | put_disk(disk); | ||
1512 | module_put(owner); | ||
1513 | } | ||
1514 | } | ||
1515 | EXPORT_SYMBOL(put_disk_and_module); | ||
1516 | |||
1485 | static void set_disk_ro_uevent(struct gendisk *gd, int ro) | 1517 | static void set_disk_ro_uevent(struct gendisk *gd, int ro) |
1486 | { | 1518 | { |
1487 | char event[] = "DISK_RO=1"; | 1519 | char event[] = "DISK_RO=1"; |
diff --git a/block/ioctl.c b/block/ioctl.c index 1668506d8ed8..3884d810efd2 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -225,7 +225,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, | |||
225 | 225 | ||
226 | if (start + len > i_size_read(bdev->bd_inode)) | 226 | if (start + len > i_size_read(bdev->bd_inode)) |
227 | return -EINVAL; | 227 | return -EINVAL; |
228 | truncate_inode_pages_range(mapping, start, start + len); | 228 | truncate_inode_pages_range(mapping, start, start + len - 1); |
229 | return blkdev_issue_discard(bdev, start >> 9, len >> 9, | 229 | return blkdev_issue_discard(bdev, start >> 9, len >> 9, |
230 | GFP_KERNEL, flags); | 230 | GFP_KERNEL, flags); |
231 | } | 231 | } |
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index f95c60774ce8..0d6d25e32e1f 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c | |||
@@ -833,6 +833,7 @@ static struct elevator_type kyber_sched = { | |||
833 | .limit_depth = kyber_limit_depth, | 833 | .limit_depth = kyber_limit_depth, |
834 | .prepare_request = kyber_prepare_request, | 834 | .prepare_request = kyber_prepare_request, |
835 | .finish_request = kyber_finish_request, | 835 | .finish_request = kyber_finish_request, |
836 | .requeue_request = kyber_finish_request, | ||
836 | .completed_request = kyber_completed_request, | 837 | .completed_request = kyber_completed_request, |
837 | .dispatch_request = kyber_dispatch_request, | 838 | .dispatch_request = kyber_dispatch_request, |
838 | .has_work = kyber_has_work, | 839 | .has_work = kyber_has_work, |
diff --git a/block/mq-deadline.c b/block/mq-deadline.c index c56f211c8440..8ec0ba9f5386 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c | |||
@@ -536,12 +536,21 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, | |||
536 | } | 536 | } |
537 | 537 | ||
538 | /* | 538 | /* |
539 | * Nothing to do here. This is defined only to ensure that .finish_request | ||
540 | * method is called upon request completion. | ||
541 | */ | ||
542 | static void dd_prepare_request(struct request *rq, struct bio *bio) | ||
543 | { | ||
544 | } | ||
545 | |||
546 | /* | ||
539 | * For zoned block devices, write unlock the target zone of | 547 | * For zoned block devices, write unlock the target zone of |
540 | * completed write requests. Do this while holding the zone lock | 548 | * completed write requests. Do this while holding the zone lock |
541 | * spinlock so that the zone is never unlocked while deadline_fifo_request() | 549 | * spinlock so that the zone is never unlocked while deadline_fifo_request() |
542 | * while deadline_next_request() are executing. | 550 | * or deadline_next_request() are executing. This function is called for |
551 | * all requests, whether or not these requests complete successfully. | ||
543 | */ | 552 | */ |
544 | static void dd_completed_request(struct request *rq) | 553 | static void dd_finish_request(struct request *rq) |
545 | { | 554 | { |
546 | struct request_queue *q = rq->q; | 555 | struct request_queue *q = rq->q; |
547 | 556 | ||
@@ -756,7 +765,8 @@ static struct elevator_type mq_deadline = { | |||
756 | .ops.mq = { | 765 | .ops.mq = { |
757 | .insert_requests = dd_insert_requests, | 766 | .insert_requests = dd_insert_requests, |
758 | .dispatch_request = dd_dispatch_request, | 767 | .dispatch_request = dd_dispatch_request, |
759 | .completed_request = dd_completed_request, | 768 | .prepare_request = dd_prepare_request, |
769 | .finish_request = dd_finish_request, | ||
760 | .next_request = elv_rb_latter_request, | 770 | .next_request = elv_rb_latter_request, |
761 | .former_request = elv_rb_former_request, | 771 | .former_request = elv_rb_former_request, |
762 | .bio_merge = dd_bio_merge, | 772 | .bio_merge = dd_bio_merge, |
diff --git a/block/partition-generic.c b/block/partition-generic.c index 91622db9aedf..08dabcd8b6ae 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -51,6 +51,12 @@ const char *bdevname(struct block_device *bdev, char *buf) | |||
51 | 51 | ||
52 | EXPORT_SYMBOL(bdevname); | 52 | EXPORT_SYMBOL(bdevname); |
53 | 53 | ||
54 | const char *bio_devname(struct bio *bio, char *buf) | ||
55 | { | ||
56 | return disk_name(bio->bi_disk, bio->bi_partno, buf); | ||
57 | } | ||
58 | EXPORT_SYMBOL(bio_devname); | ||
59 | |||
54 | /* | 60 | /* |
55 | * There's very little reason to use this, you should really | 61 | * There's very little reason to use this, you should really |
56 | * have a struct block_device just about everywhere and use | 62 | * have a struct block_device just about everywhere and use |
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index 9180b9bd5821..834509506ef6 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c | |||
@@ -97,7 +97,7 @@ static struct img_ascii_lcd_config boston_config = { | |||
97 | static void malta_update(struct img_ascii_lcd_ctx *ctx) | 97 | static void malta_update(struct img_ascii_lcd_ctx *ctx) |
98 | { | 98 | { |
99 | unsigned int i; | 99 | unsigned int i; |
100 | int err; | 100 | int err = 0; |
101 | 101 | ||
102 | for (i = 0; i < ctx->cfg->num_chars; i++) { | 102 | for (i = 0; i < ctx->cfg->num_chars; i++) { |
103 | err = regmap_write(ctx->regmap, | 103 | err = regmap_write(ctx->regmap, |
@@ -180,7 +180,7 @@ static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx) | |||
180 | static void sead3_update(struct img_ascii_lcd_ctx *ctx) | 180 | static void sead3_update(struct img_ascii_lcd_ctx *ctx) |
181 | { | 181 | { |
182 | unsigned int i; | 182 | unsigned int i; |
183 | int err; | 183 | int err = 0; |
184 | 184 | ||
185 | for (i = 0; i < ctx->cfg->num_chars; i++) { | 185 | for (i = 0; i < ctx->cfg->num_chars; i++) { |
186 | err = sead3_wait_lcd_idle(ctx); | 186 | err = sead3_wait_lcd_idle(ctx); |
@@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches); | |||
224 | 224 | ||
225 | /** | 225 | /** |
226 | * img_ascii_lcd_scroll() - scroll the display by a character | 226 | * img_ascii_lcd_scroll() - scroll the display by a character |
227 | * @arg: really a pointer to the private data structure | 227 | * @t: really a pointer to the private data structure |
228 | * | 228 | * |
229 | * Scroll the current message along the LCD by one character, rearming the | 229 | * Scroll the current message along the LCD by one character, rearming the |
230 | * timer if required. | 230 | * timer if required. |
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index ea7869c0d7f9..ec5e8800f8ad 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c | |||
@@ -1372,7 +1372,7 @@ static void panel_process_inputs(void) | |||
1372 | break; | 1372 | break; |
1373 | input->rise_timer = 0; | 1373 | input->rise_timer = 0; |
1374 | input->state = INPUT_ST_RISING; | 1374 | input->state = INPUT_ST_RISING; |
1375 | /* no break here, fall through */ | 1375 | /* fall through */ |
1376 | case INPUT_ST_RISING: | 1376 | case INPUT_ST_RISING: |
1377 | if ((phys_curr & input->mask) != input->value) { | 1377 | if ((phys_curr & input->mask) != input->value) { |
1378 | input->state = INPUT_ST_LOW; | 1378 | input->state = INPUT_ST_LOW; |
@@ -1385,11 +1385,11 @@ static void panel_process_inputs(void) | |||
1385 | } | 1385 | } |
1386 | input->high_timer = 0; | 1386 | input->high_timer = 0; |
1387 | input->state = INPUT_ST_HIGH; | 1387 | input->state = INPUT_ST_HIGH; |
1388 | /* no break here, fall through */ | 1388 | /* fall through */ |
1389 | case INPUT_ST_HIGH: | 1389 | case INPUT_ST_HIGH: |
1390 | if (input_state_high(input)) | 1390 | if (input_state_high(input)) |
1391 | break; | 1391 | break; |
1392 | /* no break here, fall through */ | 1392 | /* fall through */ |
1393 | case INPUT_ST_FALLING: | 1393 | case INPUT_ST_FALLING: |
1394 | input_state_falling(input); | 1394 | input_state_falling(input); |
1395 | } | 1395 | } |
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index e5aa62fcf5a8..3aaf6af3ec23 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -1758,7 +1758,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
1758 | if (unit[drive].type->code == FD_NODRIVE) | 1758 | if (unit[drive].type->code == FD_NODRIVE) |
1759 | return NULL; | 1759 | return NULL; |
1760 | *part = 0; | 1760 | *part = 0; |
1761 | return get_disk(unit[drive].gendisk); | 1761 | return get_disk_and_module(unit[drive].gendisk); |
1762 | } | 1762 | } |
1763 | 1763 | ||
1764 | static int __init amiga_floppy_probe(struct platform_device *pdev) | 1764 | static int __init amiga_floppy_probe(struct platform_device *pdev) |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 8bc3b9fd8dd2..dfb2c2622e5a 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -1917,7 +1917,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
1917 | if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS) | 1917 | if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS) |
1918 | return NULL; | 1918 | return NULL; |
1919 | *part = 0; | 1919 | *part = 0; |
1920 | return get_disk(unit[drive].disk); | 1920 | return get_disk_and_module(unit[drive].disk); |
1921 | } | 1921 | } |
1922 | 1922 | ||
1923 | static int __init atari_floppy_init (void) | 1923 | static int __init atari_floppy_init (void) |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 8028a3a7e7fd..deea78e485da 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -456,7 +456,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data) | |||
456 | 456 | ||
457 | mutex_lock(&brd_devices_mutex); | 457 | mutex_lock(&brd_devices_mutex); |
458 | brd = brd_init_one(MINOR(dev) / max_part, &new); | 458 | brd = brd_init_one(MINOR(dev) / max_part, &new); |
459 | kobj = brd ? get_disk(brd->brd_disk) : NULL; | 459 | kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL; |
460 | mutex_unlock(&brd_devices_mutex); | 460 | mutex_unlock(&brd_devices_mutex); |
461 | 461 | ||
462 | if (new) | 462 | if (new) |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index eae484acfbbc..8ec7235fc93b 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4505,7 +4505,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
4505 | if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) | 4505 | if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) |
4506 | return NULL; | 4506 | return NULL; |
4507 | *part = 0; | 4507 | *part = 0; |
4508 | return get_disk(disks[drive]); | 4508 | return get_disk_and_module(disks[drive]); |
4509 | } | 4509 | } |
4510 | 4510 | ||
4511 | static int __init do_floppy_init(void) | 4511 | static int __init do_floppy_init(void) |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index d5fe720cf149..ee62d2d517bf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) | |||
266 | struct iov_iter i; | 266 | struct iov_iter i; |
267 | ssize_t bw; | 267 | ssize_t bw; |
268 | 268 | ||
269 | iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); | 269 | iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); |
270 | 270 | ||
271 | file_start_write(file); | 271 | file_start_write(file); |
272 | bw = vfs_iter_write(file, &i, ppos, 0); | 272 | bw = vfs_iter_write(file, &i, ppos, 0); |
@@ -1922,7 +1922,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) | |||
1922 | if (err < 0) | 1922 | if (err < 0) |
1923 | kobj = NULL; | 1923 | kobj = NULL; |
1924 | else | 1924 | else |
1925 | kobj = get_disk(lo->lo_disk); | 1925 | kobj = get_disk_and_module(lo->lo_disk); |
1926 | mutex_unlock(&loop_index_mutex); | 1926 | mutex_unlock(&loop_index_mutex); |
1927 | 1927 | ||
1928 | *part = 0; | 1928 | *part = 0; |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 5f2a4240a204..86258b00a1d4 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -1591,7 +1591,7 @@ again: | |||
1591 | if (new_index < 0) { | 1591 | if (new_index < 0) { |
1592 | mutex_unlock(&nbd_index_mutex); | 1592 | mutex_unlock(&nbd_index_mutex); |
1593 | printk(KERN_ERR "nbd: failed to add new device\n"); | 1593 | printk(KERN_ERR "nbd: failed to add new device\n"); |
1594 | return ret; | 1594 | return new_index; |
1595 | } | 1595 | } |
1596 | nbd = idr_find(&nbd_index_idr, new_index); | 1596 | nbd = idr_find(&nbd_index_idr, new_index); |
1597 | } | 1597 | } |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 531a0915066b..c61d20c9f3f8 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt) | |||
1122 | pkt->sector = new_sector; | 1122 | pkt->sector = new_sector; |
1123 | 1123 | ||
1124 | bio_reset(pkt->bio); | 1124 | bio_reset(pkt->bio); |
1125 | bio_set_set(pkt->bio, pd->bdev); | 1125 | bio_set_dev(pkt->bio, pd->bdev); |
1126 | bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); | 1126 | bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); |
1127 | pkt->bio->bi_iter.bi_sector = new_sector; | 1127 | pkt->bio->bi_iter.bi_sector = new_sector; |
1128 | pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; | 1128 | pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; |
diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 84434d3ea19b..64e066eba72e 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c | |||
@@ -799,7 +799,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
799 | return NULL; | 799 | return NULL; |
800 | 800 | ||
801 | *part = 0; | 801 | *part = 0; |
802 | return get_disk(swd->unit[drive].disk); | 802 | return get_disk_and_module(swd->unit[drive].disk); |
803 | } | 803 | } |
804 | 804 | ||
805 | static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) | 805 | static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e126e4cac2ca..92ec1bbece51 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock); | |||
262 | 262 | ||
263 | static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); | 263 | static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); |
264 | static void blkfront_gather_backend_features(struct blkfront_info *info); | 264 | static void blkfront_gather_backend_features(struct blkfront_info *info); |
265 | static int negotiate_mq(struct blkfront_info *info); | ||
265 | 266 | ||
266 | static int get_id_from_freelist(struct blkfront_ring_info *rinfo) | 267 | static int get_id_from_freelist(struct blkfront_ring_info *rinfo) |
267 | { | 268 | { |
@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev, | |||
1774 | unsigned int i, max_page_order; | 1775 | unsigned int i, max_page_order; |
1775 | unsigned int ring_page_order; | 1776 | unsigned int ring_page_order; |
1776 | 1777 | ||
1778 | if (!info) | ||
1779 | return -ENODEV; | ||
1780 | |||
1777 | max_page_order = xenbus_read_unsigned(info->xbdev->otherend, | 1781 | max_page_order = xenbus_read_unsigned(info->xbdev->otherend, |
1778 | "max-ring-page-order", 0); | 1782 | "max-ring-page-order", 0); |
1779 | ring_page_order = min(xen_blkif_max_ring_order, max_page_order); | 1783 | ring_page_order = min(xen_blkif_max_ring_order, max_page_order); |
1780 | info->nr_ring_pages = 1 << ring_page_order; | 1784 | info->nr_ring_pages = 1 << ring_page_order; |
1781 | 1785 | ||
1786 | err = negotiate_mq(info); | ||
1787 | if (err) | ||
1788 | goto destroy_blkring; | ||
1789 | |||
1782 | for (i = 0; i < info->nr_rings; i++) { | 1790 | for (i = 0; i < info->nr_rings; i++) { |
1783 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; | 1791 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; |
1784 | 1792 | ||
@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1978 | } | 1986 | } |
1979 | 1987 | ||
1980 | info->xbdev = dev; | 1988 | info->xbdev = dev; |
1981 | err = negotiate_mq(info); | ||
1982 | if (err) { | ||
1983 | kfree(info); | ||
1984 | return err; | ||
1985 | } | ||
1986 | 1989 | ||
1987 | mutex_init(&info->mutex); | 1990 | mutex_init(&info->mutex); |
1988 | info->vdevice = vdevice; | 1991 | info->vdevice = vdevice; |
@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
2099 | 2102 | ||
2100 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); | 2103 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); |
2101 | 2104 | ||
2102 | err = negotiate_mq(info); | ||
2103 | if (err) | ||
2104 | return err; | ||
2105 | |||
2106 | err = talk_to_blkback(dev, info); | 2105 | err = talk_to_blkback(dev, info); |
2107 | if (!err) | 2106 | if (!err) |
2108 | blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); | 2107 | blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); |
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 41c95c9b2ab4..8f9130ab5887 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c | |||
@@ -332,7 +332,7 @@ static const struct block_device_operations z2_fops = | |||
332 | static struct kobject *z2_find(dev_t dev, int *part, void *data) | 332 | static struct kobject *z2_find(dev_t dev, int *part, void *data) |
333 | { | 333 | { |
334 | *part = 0; | 334 | *part = 0; |
335 | return get_disk(z2ram_gendisk); | 335 | return get_disk_and_module(z2ram_gendisk); |
336 | } | 336 | } |
337 | 337 | ||
338 | static struct request_queue *z2_queue; | 338 | static struct request_queue *z2_queue; |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 2a55380ad730..60bf04b8f103 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -21,6 +21,7 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/dmi.h> | ||
24 | #include <linux/module.h> | 25 | #include <linux/module.h> |
25 | #include <linux/usb.h> | 26 | #include <linux/usb.h> |
26 | #include <linux/usb/quirks.h> | 27 | #include <linux/usb/quirks.h> |
@@ -379,6 +380,21 @@ static const struct usb_device_id blacklist_table[] = { | |||
379 | { } /* Terminating entry */ | 380 | { } /* Terminating entry */ |
380 | }; | 381 | }; |
381 | 382 | ||
383 | /* The Bluetooth USB module build into some devices needs to be reset on resume, | ||
384 | * this is a problem with the platform (likely shutting off all power) not with | ||
385 | * the module itself. So we use a DMI list to match known broken platforms. | ||
386 | */ | ||
387 | static const struct dmi_system_id btusb_needs_reset_resume_table[] = { | ||
388 | { | ||
389 | /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */ | ||
390 | .matches = { | ||
391 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
392 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"), | ||
393 | }, | ||
394 | }, | ||
395 | {} | ||
396 | }; | ||
397 | |||
382 | #define BTUSB_MAX_ISOC_FRAMES 10 | 398 | #define BTUSB_MAX_ISOC_FRAMES 10 |
383 | 399 | ||
384 | #define BTUSB_INTR_RUNNING 0 | 400 | #define BTUSB_INTR_RUNNING 0 |
@@ -2945,6 +2961,9 @@ static int btusb_probe(struct usb_interface *intf, | |||
2945 | hdev->send = btusb_send_frame; | 2961 | hdev->send = btusb_send_frame; |
2946 | hdev->notify = btusb_notify; | 2962 | hdev->notify = btusb_notify; |
2947 | 2963 | ||
2964 | if (dmi_check_system(btusb_needs_reset_resume_table)) | ||
2965 | interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; | ||
2966 | |||
2948 | #ifdef CONFIG_PM | 2967 | #ifdef CONFIG_PM |
2949 | err = btusb_config_oob_wake(hdev); | 2968 | err = btusb_config_oob_wake(hdev); |
2950 | if (err) | 2969 | if (err) |
@@ -3031,12 +3050,6 @@ static int btusb_probe(struct usb_interface *intf, | |||
3031 | if (id->driver_info & BTUSB_QCA_ROME) { | 3050 | if (id->driver_info & BTUSB_QCA_ROME) { |
3032 | data->setup_on_usb = btusb_setup_qca; | 3051 | data->setup_on_usb = btusb_setup_qca; |
3033 | hdev->set_bdaddr = btusb_set_bdaddr_ath3012; | 3052 | hdev->set_bdaddr = btusb_set_bdaddr_ath3012; |
3034 | |||
3035 | /* QCA Rome devices lose their updated firmware over suspend, | ||
3036 | * but the USB hub doesn't notice any status change. | ||
3037 | * explicitly request a device reset on resume. | ||
3038 | */ | ||
3039 | interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; | ||
3040 | } | 3053 | } |
3041 | 3054 | ||
3042 | #ifdef CONFIG_BT_HCIBTUSB_RTL | 3055 | #ifdef CONFIG_BT_HCIBTUSB_RTL |
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 0438a64b8185..6314dfb02969 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c | |||
@@ -922,12 +922,13 @@ static int bcm_get_resources(struct bcm_device *dev) | |||
922 | 922 | ||
923 | dev->clk = devm_clk_get(dev->dev, NULL); | 923 | dev->clk = devm_clk_get(dev->dev, NULL); |
924 | 924 | ||
925 | dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup", | 925 | dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup", |
926 | GPIOD_OUT_LOW); | 926 | GPIOD_OUT_LOW); |
927 | if (IS_ERR(dev->device_wakeup)) | 927 | if (IS_ERR(dev->device_wakeup)) |
928 | return PTR_ERR(dev->device_wakeup); | 928 | return PTR_ERR(dev->device_wakeup); |
929 | 929 | ||
930 | dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW); | 930 | dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown", |
931 | GPIOD_OUT_LOW); | ||
931 | if (IS_ERR(dev->shutdown)) | 932 | if (IS_ERR(dev->shutdown)) |
932 | return PTR_ERR(dev->shutdown); | 933 | return PTR_ERR(dev->shutdown); |
933 | 934 | ||
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4d46003c46cf..cdaeeea7999c 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
@@ -630,7 +630,7 @@ static int sysc_init_dts_quirks(struct sysc *ddata) | |||
630 | for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { | 630 | for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { |
631 | prop = of_get_property(np, sysc_dts_quirks[i].name, &len); | 631 | prop = of_get_property(np, sysc_dts_quirks[i].name, &len); |
632 | if (!prop) | 632 | if (!prop) |
633 | break; | 633 | continue; |
634 | 634 | ||
635 | ddata->cfg.quirks |= sysc_dts_quirks[i].mask; | 635 | ddata->cfg.quirks |= sysc_dts_quirks[i].mask; |
636 | } | 636 | } |
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 4d1dc8b46877..f95b9c75175b 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c | |||
@@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, | |||
457 | size_t count) | 457 | size_t count) |
458 | { | 458 | { |
459 | int size = 0; | 459 | int size = 0; |
460 | int expected; | 460 | u32 expected; |
461 | 461 | ||
462 | if (!chip) | 462 | if (!chip) |
463 | return -EBUSY; | 463 | return -EBUSY; |
@@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, | |||
474 | } | 474 | } |
475 | 475 | ||
476 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); | 476 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); |
477 | if (expected > count) { | 477 | if (expected > count || expected < TPM_HEADER_SIZE) { |
478 | size = -EIO; | 478 | size = -EIO; |
479 | goto out; | 479 | goto out; |
480 | } | 480 | } |
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 76df4fbcf089..9e80a953d693 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c | |||
@@ -1190,6 +1190,10 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max) | |||
1190 | break; | 1190 | break; |
1191 | 1191 | ||
1192 | recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); | 1192 | recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); |
1193 | if (recd > num_bytes) { | ||
1194 | total = -EFAULT; | ||
1195 | break; | ||
1196 | } | ||
1193 | 1197 | ||
1194 | rlength = be32_to_cpu(tpm_cmd.header.out.length); | 1198 | rlength = be32_to_cpu(tpm_cmd.header.out.length); |
1195 | if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + | 1199 | if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + |
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index c17e75348a99..a700f8f9ead7 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c | |||
@@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, | |||
683 | if (!rc) { | 683 | if (!rc) { |
684 | data_len = be16_to_cpup( | 684 | data_len = be16_to_cpup( |
685 | (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); | 685 | (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); |
686 | if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) { | ||
687 | rc = -EFAULT; | ||
688 | goto out; | ||
689 | } | ||
686 | 690 | ||
687 | rlength = be32_to_cpu(((struct tpm2_cmd *)&buf) | 691 | rlength = be32_to_cpu(((struct tpm2_cmd *)&buf) |
688 | ->header.out.length); | 692 | ->header.out.length); |
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index c1dd39eaaeeb..6116cd05e228 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c | |||
@@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) | |||
473 | static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) | 473 | static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) |
474 | { | 474 | { |
475 | int size = 0; | 475 | int size = 0; |
476 | int expected, status; | 476 | int status; |
477 | u32 expected; | ||
477 | 478 | ||
478 | if (count < TPM_HEADER_SIZE) { | 479 | if (count < TPM_HEADER_SIZE) { |
479 | size = -EIO; | 480 | size = -EIO; |
@@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
488 | } | 489 | } |
489 | 490 | ||
490 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); | 491 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); |
491 | if ((size_t) expected > count) { | 492 | if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) { |
492 | size = -EIO; | 493 | size = -EIO; |
493 | goto out; | 494 | goto out; |
494 | } | 495 | } |
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index c6428771841f..caa86b19c76d 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c | |||
@@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
281 | struct device *dev = chip->dev.parent; | 281 | struct device *dev = chip->dev.parent; |
282 | struct i2c_client *client = to_i2c_client(dev); | 282 | struct i2c_client *client = to_i2c_client(dev); |
283 | s32 rc; | 283 | s32 rc; |
284 | int expected, status, burst_count, retries, size = 0; | 284 | int status; |
285 | int burst_count; | ||
286 | int retries; | ||
287 | int size = 0; | ||
288 | u32 expected; | ||
285 | 289 | ||
286 | if (count < TPM_HEADER_SIZE) { | 290 | if (count < TPM_HEADER_SIZE) { |
287 | i2c_nuvoton_ready(chip); /* return to idle */ | 291 | i2c_nuvoton_ready(chip); /* return to idle */ |
@@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
323 | * to machine native | 327 | * to machine native |
324 | */ | 328 | */ |
325 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); | 329 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); |
326 | if (expected > count) { | 330 | if (expected > count || expected < size) { |
327 | dev_err(dev, "%s() expected > count\n", __func__); | 331 | dev_err(dev, "%s() expected > count\n", __func__); |
328 | size = -EIO; | 332 | size = -EIO; |
329 | continue; | 333 | continue; |
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 183a5f54d875..da074e3db19b 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c | |||
@@ -270,7 +270,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
270 | { | 270 | { |
271 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | 271 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); |
272 | int size = 0; | 272 | int size = 0; |
273 | int expected, status; | 273 | int status; |
274 | u32 expected; | ||
274 | 275 | ||
275 | if (count < TPM_HEADER_SIZE) { | 276 | if (count < TPM_HEADER_SIZE) { |
276 | size = -EIO; | 277 | size = -EIO; |
@@ -285,7 +286,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
285 | } | 286 | } |
286 | 287 | ||
287 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); | 288 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); |
288 | if (expected > count) { | 289 | if (expected > count || expected < TPM_HEADER_SIZE) { |
289 | size = -EIO; | 290 | size = -EIO; |
290 | goto out; | 291 | goto out; |
291 | } | 292 | } |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index b3b4ed9b6874..d2e5382821a4 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -386,6 +386,7 @@ config ATMEL_PIT | |||
386 | 386 | ||
387 | config ATMEL_ST | 387 | config ATMEL_ST |
388 | bool "Atmel ST timer support" if COMPILE_TEST | 388 | bool "Atmel ST timer support" if COMPILE_TEST |
389 | depends on HAS_IOMEM | ||
389 | select TIMER_OF | 390 | select TIMER_OF |
390 | select MFD_SYSCON | 391 | select MFD_SYSCON |
391 | help | 392 | help |
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c index 4927355f9cbe..471b428d8034 100644 --- a/drivers/clocksource/arc_timer.c +++ b/drivers/clocksource/arc_timer.c | |||
@@ -251,9 +251,14 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) | |||
251 | int irq_reenable = clockevent_state_periodic(evt); | 251 | int irq_reenable = clockevent_state_periodic(evt); |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * Any write to CTRL reg ACks the interrupt, we rewrite the | 254 | * 1. ACK the interrupt |
255 | * Count when [N]ot [H]alted bit. | 255 | * - For ARC700, any write to CTRL reg ACKs it, so just rewrite |
256 | * And re-arm it if perioid by [I]nterrupt [E]nable bit | 256 | * Count when [N]ot [H]alted bit. |
257 | * - For HS3x, it is a bit subtle. On taken count-down interrupt, | ||
258 | * IP bit [3] is set, which needs to be cleared for ACK'ing. | ||
259 | * The write below can only update the other two bits, hence | ||
260 | * explicitly clears IP bit | ||
261 | * 2. Re-arm interrupt if periodic by writing to IE bit [0] | ||
257 | */ | 262 | */ |
258 | write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); | 263 | write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); |
259 | 264 | ||
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c index 3ee7e6fea621..846d18daf893 100644 --- a/drivers/clocksource/fsl_ftm_timer.c +++ b/drivers/clocksource/fsl_ftm_timer.c | |||
@@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name, | |||
281 | 281 | ||
282 | static unsigned long __init ftm_clk_init(struct device_node *np) | 282 | static unsigned long __init ftm_clk_init(struct device_node *np) |
283 | { | 283 | { |
284 | unsigned long freq; | 284 | long freq; |
285 | 285 | ||
286 | freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); | 286 | freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); |
287 | if (freq <= 0) | 287 | if (freq <= 0) |
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 65e18c86d9b9..986b6796b631 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c | |||
@@ -166,7 +166,7 @@ static int __init __gic_clocksource_init(void) | |||
166 | 166 | ||
167 | /* Set clocksource mask. */ | 167 | /* Set clocksource mask. */ |
168 | count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; | 168 | count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; |
169 | count_width >>= __fls(GIC_CONFIG_COUNTBITS); | 169 | count_width >>= __ffs(GIC_CONFIG_COUNTBITS); |
170 | count_width *= 4; | 170 | count_width *= 4; |
171 | count_width += 32; | 171 | count_width += 32; |
172 | gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); | 172 | gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 3a88e33b0cfe..fb586e09682d 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -44,10 +44,10 @@ config ARM_DT_BL_CPUFREQ | |||
44 | 44 | ||
45 | config ARM_SCPI_CPUFREQ | 45 | config ARM_SCPI_CPUFREQ |
46 | tristate "SCPI based CPUfreq driver" | 46 | tristate "SCPI based CPUfreq driver" |
47 | depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI | 47 | depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI |
48 | help | 48 | help |
49 | This adds the CPUfreq driver support for ARM big.LITTLE platforms | 49 | This adds the CPUfreq driver support for ARM platforms using SCPI |
50 | using SCPI protocol for CPU power management. | 50 | protocol for CPU power management. |
51 | 51 | ||
52 | This driver uses SCPI Message Protocol driver to interact with the | 52 | This driver uses SCPI Message Protocol driver to interact with the |
53 | firmware providing the CPU DVFS functionality. | 53 | firmware providing the CPU DVFS functionality. |
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 7b596fa38ad2..6bebc1f9f55a 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c | |||
@@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) | |||
351 | static int s3c_cpufreq_init(struct cpufreq_policy *policy) | 351 | static int s3c_cpufreq_init(struct cpufreq_policy *policy) |
352 | { | 352 | { |
353 | policy->clk = clk_arm; | 353 | policy->clk = clk_arm; |
354 | return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); | 354 | |
355 | policy->cpuinfo.transition_latency = cpu_cur.info->latency; | ||
356 | |||
357 | if (ftab) | ||
358 | return cpufreq_table_validate_and_show(policy, ftab); | ||
359 | |||
360 | return 0; | ||
355 | } | 361 | } |
356 | 362 | ||
357 | static int __init s3c_cpufreq_initclks(void) | 363 | static int __init s3c_cpufreq_initclks(void) |
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index c32a833e1b00..d300a163945f 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c | |||
@@ -51,15 +51,23 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) | |||
51 | static int | 51 | static int |
52 | scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) | 52 | scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) |
53 | { | 53 | { |
54 | unsigned long freq = policy->freq_table[index].frequency; | ||
54 | struct scpi_data *priv = policy->driver_data; | 55 | struct scpi_data *priv = policy->driver_data; |
55 | u64 rate = policy->freq_table[index].frequency * 1000; | 56 | u64 rate = freq * 1000; |
56 | int ret; | 57 | int ret; |
57 | 58 | ||
58 | ret = clk_set_rate(priv->clk, rate); | 59 | ret = clk_set_rate(priv->clk, rate); |
59 | if (!ret && (clk_get_rate(priv->clk) != rate)) | ||
60 | ret = -EIO; | ||
61 | 60 | ||
62 | return ret; | 61 | if (ret) |
62 | return ret; | ||
63 | |||
64 | if (clk_get_rate(priv->clk) != rate) | ||
65 | return -EIO; | ||
66 | |||
67 | arch_set_freq_scale(policy->related_cpus, freq, | ||
68 | policy->cpuinfo.max_freq); | ||
69 | |||
70 | return 0; | ||
63 | } | 71 | } |
64 | 72 | ||
65 | static int | 73 | static int |
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index fcfa5b1eae61..b3afb6cc9d72 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c | |||
@@ -211,7 +211,7 @@ static int __sev_platform_shutdown_locked(int *error) | |||
211 | { | 211 | { |
212 | int ret; | 212 | int ret; |
213 | 213 | ||
214 | ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error); | 214 | ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); |
215 | if (ret) | 215 | if (ret) |
216 | return ret; | 216 | return ret; |
217 | 217 | ||
@@ -271,7 +271,7 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp) | |||
271 | return rc; | 271 | return rc; |
272 | } | 272 | } |
273 | 273 | ||
274 | return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error); | 274 | return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); |
275 | } | 275 | } |
276 | 276 | ||
277 | static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) | 277 | static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) |
@@ -299,7 +299,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp) | |||
299 | return rc; | 299 | return rc; |
300 | } | 300 | } |
301 | 301 | ||
302 | return __sev_do_cmd_locked(cmd, 0, &argp->error); | 302 | return __sev_do_cmd_locked(cmd, NULL, &argp->error); |
303 | } | 303 | } |
304 | 304 | ||
305 | static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) | 305 | static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) |
@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(sev_guest_decommission); | |||
624 | 624 | ||
625 | int sev_guest_df_flush(int *error) | 625 | int sev_guest_df_flush(int *error) |
626 | { | 626 | { |
627 | return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error); | 627 | return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); |
628 | } | 628 | } |
629 | EXPORT_SYMBOL_GPL(sev_guest_df_flush); | 629 | EXPORT_SYMBOL_GPL(sev_guest_df_flush); |
630 | 630 | ||
diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 473af694ad1c..ecdc292aa4e4 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c | |||
@@ -246,12 +246,6 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, | |||
246 | { | 246 | { |
247 | long avail; | 247 | long avail; |
248 | 248 | ||
249 | /* | ||
250 | * The device driver is allowed to sleep, in order to make the | ||
251 | * memory directly accessible. | ||
252 | */ | ||
253 | might_sleep(); | ||
254 | |||
255 | if (!dax_dev) | 249 | if (!dax_dev) |
256 | return -EOPNOTSUPP; | 250 | return -EOPNOTSUPP; |
257 | 251 | ||
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index f652a0e0f5a2..3548caa9e933 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c | |||
@@ -163,6 +163,7 @@ struct mv_xor_v2_device { | |||
163 | void __iomem *dma_base; | 163 | void __iomem *dma_base; |
164 | void __iomem *glob_base; | 164 | void __iomem *glob_base; |
165 | struct clk *clk; | 165 | struct clk *clk; |
166 | struct clk *reg_clk; | ||
166 | struct tasklet_struct irq_tasklet; | 167 | struct tasklet_struct irq_tasklet; |
167 | struct list_head free_sw_desc; | 168 | struct list_head free_sw_desc; |
168 | struct dma_device dmadev; | 169 | struct dma_device dmadev; |
@@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev) | |||
749 | if (ret) | 750 | if (ret) |
750 | return ret; | 751 | return ret; |
751 | 752 | ||
753 | xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg"); | ||
754 | if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) { | ||
755 | if (!IS_ERR(xor_dev->reg_clk)) { | ||
756 | ret = clk_prepare_enable(xor_dev->reg_clk); | ||
757 | if (ret) | ||
758 | return ret; | ||
759 | } else { | ||
760 | return PTR_ERR(xor_dev->reg_clk); | ||
761 | } | ||
762 | } | ||
763 | |||
752 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); | 764 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); |
753 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) | 765 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) { |
754 | return -EPROBE_DEFER; | 766 | ret = EPROBE_DEFER; |
767 | goto disable_reg_clk; | ||
768 | } | ||
755 | if (!IS_ERR(xor_dev->clk)) { | 769 | if (!IS_ERR(xor_dev->clk)) { |
756 | ret = clk_prepare_enable(xor_dev->clk); | 770 | ret = clk_prepare_enable(xor_dev->clk); |
757 | if (ret) | 771 | if (ret) |
758 | return ret; | 772 | goto disable_reg_clk; |
759 | } | 773 | } |
760 | 774 | ||
761 | ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, | 775 | ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, |
@@ -866,8 +880,9 @@ free_hw_desq: | |||
866 | free_msi_irqs: | 880 | free_msi_irqs: |
867 | platform_msi_domain_free_irqs(&pdev->dev); | 881 | platform_msi_domain_free_irqs(&pdev->dev); |
868 | disable_clk: | 882 | disable_clk: |
869 | if (!IS_ERR(xor_dev->clk)) | 883 | clk_disable_unprepare(xor_dev->clk); |
870 | clk_disable_unprepare(xor_dev->clk); | 884 | disable_reg_clk: |
885 | clk_disable_unprepare(xor_dev->reg_clk); | ||
871 | return ret; | 886 | return ret; |
872 | } | 887 | } |
873 | 888 | ||
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index e3ff162c03fc..d0cacdb0713e 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c | |||
@@ -917,7 +917,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, | |||
917 | 917 | ||
918 | rcar_dmac_chan_configure_desc(chan, desc); | 918 | rcar_dmac_chan_configure_desc(chan, desc); |
919 | 919 | ||
920 | max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; | 920 | max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; |
921 | 921 | ||
922 | /* | 922 | /* |
923 | * Allocate and fill the transfer chunk descriptors. We own the only | 923 | * Allocate and fill the transfer chunk descriptors. We own the only |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index f34430f99fd8..872100215ca0 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = { | |||
279 | * sbridge structs | 279 | * sbridge structs |
280 | */ | 280 | */ |
281 | 281 | ||
282 | #define NUM_CHANNELS 4 /* Max channels per MC */ | 282 | #define NUM_CHANNELS 6 /* Max channels per MC */ |
283 | #define MAX_DIMMS 3 /* Max DIMMS per channel */ | 283 | #define MAX_DIMMS 3 /* Max DIMMS per channel */ |
284 | #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ | 284 | #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ |
285 | #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ | 285 | #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ |
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index c16600f30611..0bdea60c65dd 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c | |||
@@ -639,7 +639,7 @@ static void __exit dcdbas_exit(void) | |||
639 | platform_driver_unregister(&dcdbas_driver); | 639 | platform_driver_unregister(&dcdbas_driver); |
640 | } | 640 | } |
641 | 641 | ||
642 | module_init(dcdbas_init); | 642 | subsys_initcall_sync(dcdbas_init); |
643 | module_exit(dcdbas_exit); | 643 | module_exit(dcdbas_exit); |
644 | 644 | ||
645 | MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")"); | 645 | MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")"); |
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index da661bf8cb96..13c1edd37e96 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c | |||
@@ -68,11 +68,11 @@ void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg) | |||
68 | efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; | 68 | efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; |
69 | efi_status_t status; | 69 | efi_status_t status; |
70 | efi_physical_addr_t log_location, log_last_entry; | 70 | efi_physical_addr_t log_location, log_last_entry; |
71 | struct linux_efi_tpm_eventlog *log_tbl; | 71 | struct linux_efi_tpm_eventlog *log_tbl = NULL; |
72 | unsigned long first_entry_addr, last_entry_addr; | 72 | unsigned long first_entry_addr, last_entry_addr; |
73 | size_t log_size, last_entry_size; | 73 | size_t log_size, last_entry_size; |
74 | efi_bool_t truncated; | 74 | efi_bool_t truncated; |
75 | void *tcg2_protocol; | 75 | void *tcg2_protocol = NULL; |
76 | 76 | ||
77 | status = efi_call_early(locate_protocol, &tcg2_guid, NULL, | 77 | status = efi_call_early(locate_protocol, &tcg2_guid, NULL, |
78 | &tcg2_protocol); | 78 | &tcg2_protocol); |
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index e76de57dd617..ebaea8b1594b 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c | |||
@@ -14,7 +14,6 @@ | |||
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/clk.h> | ||
18 | #include <linux/err.h> | 17 | #include <linux/err.h> |
19 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
@@ -37,10 +36,9 @@ struct gpio_rcar_priv { | |||
37 | struct platform_device *pdev; | 36 | struct platform_device *pdev; |
38 | struct gpio_chip gpio_chip; | 37 | struct gpio_chip gpio_chip; |
39 | struct irq_chip irq_chip; | 38 | struct irq_chip irq_chip; |
40 | struct clk *clk; | ||
41 | unsigned int irq_parent; | 39 | unsigned int irq_parent; |
40 | atomic_t wakeup_path; | ||
42 | bool has_both_edge_trigger; | 41 | bool has_both_edge_trigger; |
43 | bool needs_clk; | ||
44 | }; | 42 | }; |
45 | 43 | ||
46 | #define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */ | 44 | #define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */ |
@@ -186,13 +184,10 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on) | |||
186 | } | 184 | } |
187 | } | 185 | } |
188 | 186 | ||
189 | if (!p->clk) | ||
190 | return 0; | ||
191 | |||
192 | if (on) | 187 | if (on) |
193 | clk_enable(p->clk); | 188 | atomic_inc(&p->wakeup_path); |
194 | else | 189 | else |
195 | clk_disable(p->clk); | 190 | atomic_dec(&p->wakeup_path); |
196 | 191 | ||
197 | return 0; | 192 | return 0; |
198 | } | 193 | } |
@@ -330,17 +325,14 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset, | |||
330 | 325 | ||
331 | struct gpio_rcar_info { | 326 | struct gpio_rcar_info { |
332 | bool has_both_edge_trigger; | 327 | bool has_both_edge_trigger; |
333 | bool needs_clk; | ||
334 | }; | 328 | }; |
335 | 329 | ||
336 | static const struct gpio_rcar_info gpio_rcar_info_gen1 = { | 330 | static const struct gpio_rcar_info gpio_rcar_info_gen1 = { |
337 | .has_both_edge_trigger = false, | 331 | .has_both_edge_trigger = false, |
338 | .needs_clk = false, | ||
339 | }; | 332 | }; |
340 | 333 | ||
341 | static const struct gpio_rcar_info gpio_rcar_info_gen2 = { | 334 | static const struct gpio_rcar_info gpio_rcar_info_gen2 = { |
342 | .has_both_edge_trigger = true, | 335 | .has_both_edge_trigger = true, |
343 | .needs_clk = true, | ||
344 | }; | 336 | }; |
345 | 337 | ||
346 | static const struct of_device_id gpio_rcar_of_table[] = { | 338 | static const struct of_device_id gpio_rcar_of_table[] = { |
@@ -403,7 +395,6 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins) | |||
403 | ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); | 395 | ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); |
404 | *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; | 396 | *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; |
405 | p->has_both_edge_trigger = info->has_both_edge_trigger; | 397 | p->has_both_edge_trigger = info->has_both_edge_trigger; |
406 | p->needs_clk = info->needs_clk; | ||
407 | 398 | ||
408 | if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { | 399 | if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { |
409 | dev_warn(&p->pdev->dev, | 400 | dev_warn(&p->pdev->dev, |
@@ -440,16 +431,6 @@ static int gpio_rcar_probe(struct platform_device *pdev) | |||
440 | 431 | ||
441 | platform_set_drvdata(pdev, p); | 432 | platform_set_drvdata(pdev, p); |
442 | 433 | ||
443 | p->clk = devm_clk_get(dev, NULL); | ||
444 | if (IS_ERR(p->clk)) { | ||
445 | if (p->needs_clk) { | ||
446 | dev_err(dev, "unable to get clock\n"); | ||
447 | ret = PTR_ERR(p->clk); | ||
448 | goto err0; | ||
449 | } | ||
450 | p->clk = NULL; | ||
451 | } | ||
452 | |||
453 | pm_runtime_enable(dev); | 434 | pm_runtime_enable(dev); |
454 | 435 | ||
455 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 436 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
@@ -531,11 +512,24 @@ static int gpio_rcar_remove(struct platform_device *pdev) | |||
531 | return 0; | 512 | return 0; |
532 | } | 513 | } |
533 | 514 | ||
515 | static int __maybe_unused gpio_rcar_suspend(struct device *dev) | ||
516 | { | ||
517 | struct gpio_rcar_priv *p = dev_get_drvdata(dev); | ||
518 | |||
519 | if (atomic_read(&p->wakeup_path)) | ||
520 | device_set_wakeup_path(dev); | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, NULL); | ||
526 | |||
534 | static struct platform_driver gpio_rcar_device_driver = { | 527 | static struct platform_driver gpio_rcar_device_driver = { |
535 | .probe = gpio_rcar_probe, | 528 | .probe = gpio_rcar_probe, |
536 | .remove = gpio_rcar_remove, | 529 | .remove = gpio_rcar_remove, |
537 | .driver = { | 530 | .driver = { |
538 | .name = "gpio_rcar", | 531 | .name = "gpio_rcar", |
532 | .pm = &gpio_rcar_pm_ops, | ||
539 | .of_match_table = of_match_ptr(gpio_rcar_of_table), | 533 | .of_match_table = of_match_ptr(gpio_rcar_of_table), |
540 | } | 534 | } |
541 | }; | 535 | }; |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 564bb7a31da4..84e5a9df2344 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
@@ -241,6 +241,19 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, | |||
241 | 241 | ||
242 | desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, | 242 | desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, |
243 | &of_flags); | 243 | &of_flags); |
244 | /* | ||
245 | * -EPROBE_DEFER in our case means that we found a | ||
246 | * valid GPIO property, but no controller has been | ||
247 | * registered so far. | ||
248 | * | ||
249 | * This means we don't need to look any further for | ||
250 | * alternate name conventions, and we should really | ||
251 | * preserve the return code for our user to be able to | ||
252 | * retry probing later. | ||
253 | */ | ||
254 | if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER) | ||
255 | return desc; | ||
256 | |||
244 | if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) | 257 | if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) |
245 | break; | 258 | break; |
246 | } | 259 | } |
@@ -250,7 +263,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, | |||
250 | desc = of_find_spi_gpio(dev, con_id, &of_flags); | 263 | desc = of_find_spi_gpio(dev, con_id, &of_flags); |
251 | 264 | ||
252 | /* Special handling for regulator GPIOs if used */ | 265 | /* Special handling for regulator GPIOs if used */ |
253 | if (IS_ERR(desc)) | 266 | if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) |
254 | desc = of_find_regulator_gpio(dev, con_id, &of_flags); | 267 | desc = of_find_regulator_gpio(dev, con_id, &of_flags); |
255 | 268 | ||
256 | if (IS_ERR(desc)) | 269 | if (IS_ERR(desc)) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d5a2eefd6c3e..74edba18b159 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -1156,7 +1156,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, | |||
1156 | /* | 1156 | /* |
1157 | * Writeback | 1157 | * Writeback |
1158 | */ | 1158 | */ |
1159 | #define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */ | 1159 | #define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */ |
1160 | 1160 | ||
1161 | struct amdgpu_wb { | 1161 | struct amdgpu_wb { |
1162 | struct amdgpu_bo *wb_obj; | 1162 | struct amdgpu_bo *wb_obj; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 57afad79f55d..8fa850a070e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | |||
@@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, | |||
540 | size_t size; | 540 | size_t size; |
541 | u32 retry = 3; | 541 | u32 retry = 3; |
542 | 542 | ||
543 | if (amdgpu_acpi_pcie_notify_device_ready(adev)) | ||
544 | return -EINVAL; | ||
545 | |||
543 | /* Get the device handle */ | 546 | /* Get the device handle */ |
544 | handle = ACPI_HANDLE(&adev->pdev->dev); | 547 | handle = ACPI_HANDLE(&adev->pdev->dev); |
545 | if (!handle) | 548 | if (!handle) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 74d2efaec52f..7a073ac5f9c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | |||
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) | |||
69 | /* don't do anything if sink is not display port, i.e., | 69 | /* don't do anything if sink is not display port, i.e., |
70 | * passive dp->(dvi|hdmi) adaptor | 70 | * passive dp->(dvi|hdmi) adaptor |
71 | */ | 71 | */ |
72 | if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | 72 | if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && |
73 | int saved_dpms = connector->dpms; | 73 | amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) && |
74 | /* Only turn off the display if it's physically disconnected */ | 74 | amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { |
75 | if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { | 75 | /* Don't start link training before we have the DPCD */ |
76 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 76 | if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) |
77 | } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { | 77 | return; |
78 | /* Don't try to start link training before we | 78 | |
79 | * have the dpcd */ | 79 | /* Turn the connector off and back on immediately, which |
80 | if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) | 80 | * will trigger link training |
81 | return; | 81 | */ |
82 | 82 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | |
83 | /* set it to OFF so that drm_helper_connector_dpms() | 83 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); |
84 | * won't return immediately since the current state | ||
85 | * is ON at this point. | ||
86 | */ | ||
87 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
88 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
89 | } | ||
90 | connector->dpms = saved_dpms; | ||
91 | } | 84 | } |
92 | } | 85 | } |
93 | } | 86 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 00a50cc5ec9a..af1b879a9ee9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -492,7 +492,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev) | |||
492 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); | 492 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); |
493 | 493 | ||
494 | /* clear wb memory */ | 494 | /* clear wb memory */ |
495 | memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); | 495 | memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); |
496 | } | 496 | } |
497 | 497 | ||
498 | return 0; | 498 | return 0; |
@@ -530,8 +530,9 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) | |||
530 | */ | 530 | */ |
531 | void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) | 531 | void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) |
532 | { | 532 | { |
533 | wb >>= 3; | ||
533 | if (wb < adev->wb.num_wb) | 534 | if (wb < adev->wb.num_wb) |
534 | __clear_bit(wb >> 3, adev->wb.used); | 535 | __clear_bit(wb, adev->wb.used); |
535 | } | 536 | } |
536 | 537 | ||
537 | /** | 538 | /** |
@@ -1455,11 +1456,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) | |||
1455 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1456 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1456 | if (!adev->ip_blocks[i].status.hw) | 1457 | if (!adev->ip_blocks[i].status.hw) |
1457 | continue; | 1458 | continue; |
1458 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { | ||
1459 | amdgpu_free_static_csa(adev); | ||
1460 | amdgpu_device_wb_fini(adev); | ||
1461 | amdgpu_device_vram_scratch_fini(adev); | ||
1462 | } | ||
1463 | 1459 | ||
1464 | if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && | 1460 | if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && |
1465 | adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { | 1461 | adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { |
@@ -1486,6 +1482,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) | |||
1486 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1482 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1487 | if (!adev->ip_blocks[i].status.sw) | 1483 | if (!adev->ip_blocks[i].status.sw) |
1488 | continue; | 1484 | continue; |
1485 | |||
1486 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { | ||
1487 | amdgpu_free_static_csa(adev); | ||
1488 | amdgpu_device_wb_fini(adev); | ||
1489 | amdgpu_device_vram_scratch_fini(adev); | ||
1490 | } | ||
1491 | |||
1489 | r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); | 1492 | r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); |
1490 | /* XXX handle errors */ | 1493 | /* XXX handle errors */ |
1491 | if (r) { | 1494 | if (r) { |
@@ -2284,14 +2287,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) | |||
2284 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | 2287 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); |
2285 | } | 2288 | } |
2286 | drm_modeset_unlock_all(dev); | 2289 | drm_modeset_unlock_all(dev); |
2287 | } else { | ||
2288 | /* | ||
2289 | * There is no equivalent atomic helper to turn on | ||
2290 | * display, so we defined our own function for this, | ||
2291 | * once suspend resume is supported by the atomic | ||
2292 | * framework this will be reworked | ||
2293 | */ | ||
2294 | amdgpu_dm_display_resume(adev); | ||
2295 | } | 2290 | } |
2296 | } | 2291 | } |
2297 | 2292 | ||
@@ -2726,7 +2721,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, | |||
2726 | if (amdgpu_device_has_dc_support(adev)) { | 2721 | if (amdgpu_device_has_dc_support(adev)) { |
2727 | if (drm_atomic_helper_resume(adev->ddev, state)) | 2722 | if (drm_atomic_helper_resume(adev->ddev, state)) |
2728 | dev_info(adev->dev, "drm resume failed:%d\n", r); | 2723 | dev_info(adev->dev, "drm resume failed:%d\n", r); |
2729 | amdgpu_dm_display_resume(adev); | ||
2730 | } else { | 2724 | } else { |
2731 | drm_helper_resume_force_mode(adev->ddev); | 2725 | drm_helper_resume_force_mode(adev->ddev); |
2732 | } | 2726 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index e48b4ec88c8c..ca6c931dabfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) | |||
36 | struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); | 36 | struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); |
37 | 37 | ||
38 | if (robj) { | 38 | if (robj) { |
39 | if (robj->gem_base.import_attach) | ||
40 | drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); | ||
41 | amdgpu_mn_unregister(robj); | 39 | amdgpu_mn_unregister(robj); |
42 | amdgpu_bo_unref(&robj); | 40 | amdgpu_bo_unref(&robj); |
43 | } | 41 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index e14ab34d8262..7c2be32c5aea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | |||
@@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, | |||
75 | static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) | 75 | static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) |
76 | { | 76 | { |
77 | struct amdgpu_gtt_mgr *mgr = man->priv; | 77 | struct amdgpu_gtt_mgr *mgr = man->priv; |
78 | 78 | spin_lock(&mgr->lock); | |
79 | drm_mm_takedown(&mgr->mm); | 79 | drm_mm_takedown(&mgr->mm); |
80 | spin_unlock(&mgr->lock); | 80 | spin_unlock(&mgr->lock); |
81 | kfree(mgr); | 81 | kfree(mgr); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 56bcd59c3399..36483e0d3c97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | |||
@@ -257,7 +257,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev) | |||
257 | r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); | 257 | r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); |
258 | if (r) { | 258 | if (r) { |
259 | adev->irq.installed = false; | 259 | adev->irq.installed = false; |
260 | flush_work(&adev->hotplug_work); | 260 | if (!amdgpu_device_has_dc_support(adev)) |
261 | flush_work(&adev->hotplug_work); | ||
261 | cancel_work_sync(&adev->reset_work); | 262 | cancel_work_sync(&adev->reset_work); |
262 | return r; | 263 | return r; |
263 | } | 264 | } |
@@ -282,7 +283,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) | |||
282 | adev->irq.installed = false; | 283 | adev->irq.installed = false; |
283 | if (adev->irq.msi_enabled) | 284 | if (adev->irq.msi_enabled) |
284 | pci_disable_msi(adev->pdev); | 285 | pci_disable_msi(adev->pdev); |
285 | flush_work(&adev->hotplug_work); | 286 | if (!amdgpu_device_has_dc_support(adev)) |
287 | flush_work(&adev->hotplug_work); | ||
286 | cancel_work_sync(&adev->reset_work); | 288 | cancel_work_sync(&adev->reset_work); |
287 | } | 289 | } |
288 | 290 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 54f06c959340..2264c5c97009 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
@@ -352,6 +352,7 @@ struct amdgpu_mode_info { | |||
352 | u16 firmware_flags; | 352 | u16 firmware_flags; |
353 | /* pointer to backlight encoder */ | 353 | /* pointer to backlight encoder */ |
354 | struct amdgpu_encoder *bl_encoder; | 354 | struct amdgpu_encoder *bl_encoder; |
355 | u8 bl_level; /* saved backlight level */ | ||
355 | struct amdgpu_audio audio; /* audio stuff */ | 356 | struct amdgpu_audio audio; /* audio stuff */ |
356 | int num_crtc; /* number of crtcs */ | 357 | int num_crtc; /* number of crtcs */ |
357 | int num_hpd; /* number of hpd pins */ | 358 | int num_hpd; /* number of hpd pins */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 5c4c3e0d527b..1220322c1680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -56,6 +56,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
56 | 56 | ||
57 | amdgpu_bo_kunmap(bo); | 57 | amdgpu_bo_kunmap(bo); |
58 | 58 | ||
59 | if (bo->gem_base.import_attach) | ||
60 | drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); | ||
59 | drm_gem_object_release(&bo->gem_base); | 61 | drm_gem_object_release(&bo->gem_base); |
60 | amdgpu_bo_unref(&bo->parent); | 62 | amdgpu_bo_unref(&bo->parent); |
61 | if (!list_empty(&bo->shadow_list)) { | 63 | if (!list_empty(&bo->shadow_list)) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 13044e66dcaf..561d3312af32 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
@@ -481,7 +481,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, | |||
481 | result = 0; | 481 | result = 0; |
482 | 482 | ||
483 | if (*pos < 12) { | 483 | if (*pos < 12) { |
484 | early[0] = amdgpu_ring_get_rptr(ring); | 484 | early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask; |
485 | early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; | 485 | early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; |
486 | early[2] = ring->wptr & ring->buf_mask; | 486 | early[2] = ring->wptr & ring->buf_mask; |
487 | for (i = *pos / 4; i < 3 && size; i++) { | 487 | for (i = *pos / 4; i < 3 && size; i++) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b2eae86bf906..5c26a8e806b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) | |||
299 | 299 | ||
300 | cancel_delayed_work_sync(&adev->uvd.idle_work); | 300 | cancel_delayed_work_sync(&adev->uvd.idle_work); |
301 | 301 | ||
302 | for (i = 0; i < adev->uvd.max_handles; ++i) | 302 | /* only valid for physical mode */ |
303 | if (atomic_read(&adev->uvd.handles[i])) | 303 | if (adev->asic_type < CHIP_POLARIS10) { |
304 | break; | 304 | for (i = 0; i < adev->uvd.max_handles; ++i) |
305 | if (atomic_read(&adev->uvd.handles[i])) | ||
306 | break; | ||
305 | 307 | ||
306 | if (i == AMDGPU_MAX_UVD_HANDLES) | 308 | if (i == adev->uvd.max_handles) |
307 | return 0; | 309 | return 0; |
310 | } | ||
308 | 311 | ||
309 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); | 312 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); |
310 | ptr = adev->uvd.cpu_addr; | 313 | ptr = adev->uvd.cpu_addr; |
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 2af26d2da127..d702fb8e3427 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/backlight.h> | 34 | #include <linux/backlight.h> |
35 | #include "bif/bif_4_1_d.h" | 35 | #include "bif/bif_4_1_d.h" |
36 | 36 | ||
37 | static u8 | 37 | u8 |
38 | amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) | 38 | amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) |
39 | { | 39 | { |
40 | u8 backlight_level; | 40 | u8 backlight_level; |
@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) | |||
48 | return backlight_level; | 48 | return backlight_level; |
49 | } | 49 | } |
50 | 50 | ||
51 | static void | 51 | void |
52 | amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, | 52 | amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, |
53 | u8 backlight_level) | 53 | u8 backlight_level) |
54 | { | 54 | { |
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h index 2bdec40515ce..f77cbdef679e 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h | |||
@@ -25,6 +25,11 @@ | |||
25 | #define __ATOMBIOS_ENCODER_H__ | 25 | #define __ATOMBIOS_ENCODER_H__ |
26 | 26 | ||
27 | u8 | 27 | u8 |
28 | amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev); | ||
29 | void | ||
30 | amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, | ||
31 | u8 backlight_level); | ||
32 | u8 | ||
28 | amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); | 33 | amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); |
29 | void | 34 | void |
30 | amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, | 35 | amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index f34bc68aadfb..022f303463fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -2921,6 +2921,11 @@ static int dce_v10_0_hw_fini(void *handle) | |||
2921 | 2921 | ||
2922 | static int dce_v10_0_suspend(void *handle) | 2922 | static int dce_v10_0_suspend(void *handle) |
2923 | { | 2923 | { |
2924 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
2925 | |||
2926 | adev->mode_info.bl_level = | ||
2927 | amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
2928 | |||
2924 | return dce_v10_0_hw_fini(handle); | 2929 | return dce_v10_0_hw_fini(handle); |
2925 | } | 2930 | } |
2926 | 2931 | ||
@@ -2929,6 +2934,9 @@ static int dce_v10_0_resume(void *handle) | |||
2929 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 2934 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2930 | int ret; | 2935 | int ret; |
2931 | 2936 | ||
2937 | amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, | ||
2938 | adev->mode_info.bl_level); | ||
2939 | |||
2932 | ret = dce_v10_0_hw_init(handle); | 2940 | ret = dce_v10_0_hw_init(handle); |
2933 | 2941 | ||
2934 | /* turn on the BL */ | 2942 | /* turn on the BL */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 26378bd6aba4..800a9f36ab4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -3047,6 +3047,11 @@ static int dce_v11_0_hw_fini(void *handle) | |||
3047 | 3047 | ||
3048 | static int dce_v11_0_suspend(void *handle) | 3048 | static int dce_v11_0_suspend(void *handle) |
3049 | { | 3049 | { |
3050 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
3051 | |||
3052 | adev->mode_info.bl_level = | ||
3053 | amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
3054 | |||
3050 | return dce_v11_0_hw_fini(handle); | 3055 | return dce_v11_0_hw_fini(handle); |
3051 | } | 3056 | } |
3052 | 3057 | ||
@@ -3055,6 +3060,9 @@ static int dce_v11_0_resume(void *handle) | |||
3055 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 3060 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3056 | int ret; | 3061 | int ret; |
3057 | 3062 | ||
3063 | amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, | ||
3064 | adev->mode_info.bl_level); | ||
3065 | |||
3058 | ret = dce_v11_0_hw_init(handle); | 3066 | ret = dce_v11_0_hw_init(handle); |
3059 | 3067 | ||
3060 | /* turn on the BL */ | 3068 | /* turn on the BL */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index bd2c4f727df6..b8368f69ce1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
@@ -2787,6 +2787,11 @@ static int dce_v6_0_hw_fini(void *handle) | |||
2787 | 2787 | ||
2788 | static int dce_v6_0_suspend(void *handle) | 2788 | static int dce_v6_0_suspend(void *handle) |
2789 | { | 2789 | { |
2790 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
2791 | |||
2792 | adev->mode_info.bl_level = | ||
2793 | amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
2794 | |||
2790 | return dce_v6_0_hw_fini(handle); | 2795 | return dce_v6_0_hw_fini(handle); |
2791 | } | 2796 | } |
2792 | 2797 | ||
@@ -2795,6 +2800,9 @@ static int dce_v6_0_resume(void *handle) | |||
2795 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 2800 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2796 | int ret; | 2801 | int ret; |
2797 | 2802 | ||
2803 | amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, | ||
2804 | adev->mode_info.bl_level); | ||
2805 | |||
2798 | ret = dce_v6_0_hw_init(handle); | 2806 | ret = dce_v6_0_hw_init(handle); |
2799 | 2807 | ||
2800 | /* turn on the BL */ | 2808 | /* turn on the BL */ |
@@ -3093,7 +3101,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, | |||
3093 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; | 3101 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; |
3094 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); | 3102 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); |
3095 | schedule_work(&adev->hotplug_work); | 3103 | schedule_work(&adev->hotplug_work); |
3096 | DRM_INFO("IH: HPD%d\n", hpd + 1); | 3104 | DRM_DEBUG("IH: HPD%d\n", hpd + 1); |
3097 | } | 3105 | } |
3098 | 3106 | ||
3099 | return 0; | 3107 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index c008dc030687..012e0a9ae0ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -2819,6 +2819,11 @@ static int dce_v8_0_hw_fini(void *handle) | |||
2819 | 2819 | ||
2820 | static int dce_v8_0_suspend(void *handle) | 2820 | static int dce_v8_0_suspend(void *handle) |
2821 | { | 2821 | { |
2822 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
2823 | |||
2824 | adev->mode_info.bl_level = | ||
2825 | amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); | ||
2826 | |||
2822 | return dce_v8_0_hw_fini(handle); | 2827 | return dce_v8_0_hw_fini(handle); |
2823 | } | 2828 | } |
2824 | 2829 | ||
@@ -2827,6 +2832,9 @@ static int dce_v8_0_resume(void *handle) | |||
2827 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 2832 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2828 | int ret; | 2833 | int ret; |
2829 | 2834 | ||
2835 | amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, | ||
2836 | adev->mode_info.bl_level); | ||
2837 | |||
2830 | ret = dce_v8_0_hw_init(handle); | 2838 | ret = dce_v8_0_hw_init(handle); |
2831 | 2839 | ||
2832 | /* turn on the BL */ | 2840 | /* turn on the BL */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index a066c5eda135..a4309698e76c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -4384,34 +4384,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) | |||
4384 | case CHIP_KAVERI: | 4384 | case CHIP_KAVERI: |
4385 | adev->gfx.config.max_shader_engines = 1; | 4385 | adev->gfx.config.max_shader_engines = 1; |
4386 | adev->gfx.config.max_tile_pipes = 4; | 4386 | adev->gfx.config.max_tile_pipes = 4; |
4387 | if ((adev->pdev->device == 0x1304) || | 4387 | adev->gfx.config.max_cu_per_sh = 8; |
4388 | (adev->pdev->device == 0x1305) || | 4388 | adev->gfx.config.max_backends_per_se = 2; |
4389 | (adev->pdev->device == 0x130C) || | ||
4390 | (adev->pdev->device == 0x130F) || | ||
4391 | (adev->pdev->device == 0x1310) || | ||
4392 | (adev->pdev->device == 0x1311) || | ||
4393 | (adev->pdev->device == 0x131C)) { | ||
4394 | adev->gfx.config.max_cu_per_sh = 8; | ||
4395 | adev->gfx.config.max_backends_per_se = 2; | ||
4396 | } else if ((adev->pdev->device == 0x1309) || | ||
4397 | (adev->pdev->device == 0x130A) || | ||
4398 | (adev->pdev->device == 0x130D) || | ||
4399 | (adev->pdev->device == 0x1313) || | ||
4400 | (adev->pdev->device == 0x131D)) { | ||
4401 | adev->gfx.config.max_cu_per_sh = 6; | ||
4402 | adev->gfx.config.max_backends_per_se = 2; | ||
4403 | } else if ((adev->pdev->device == 0x1306) || | ||
4404 | (adev->pdev->device == 0x1307) || | ||
4405 | (adev->pdev->device == 0x130B) || | ||
4406 | (adev->pdev->device == 0x130E) || | ||
4407 | (adev->pdev->device == 0x1315) || | ||
4408 | (adev->pdev->device == 0x131B)) { | ||
4409 | adev->gfx.config.max_cu_per_sh = 4; | ||
4410 | adev->gfx.config.max_backends_per_se = 1; | ||
4411 | } else { | ||
4412 | adev->gfx.config.max_cu_per_sh = 3; | ||
4413 | adev->gfx.config.max_backends_per_se = 1; | ||
4414 | } | ||
4415 | adev->gfx.config.max_sh_per_se = 1; | 4389 | adev->gfx.config.max_sh_per_se = 1; |
4416 | adev->gfx.config.max_texture_channel_caches = 4; | 4390 | adev->gfx.config.max_texture_channel_caches = 4; |
4417 | adev->gfx.config.max_gprs = 256; | 4391 | adev->gfx.config.max_gprs = 256; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2719937e09d6..3b7e7af09ead 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
@@ -634,7 +634,7 @@ static int gmc_v9_0_late_init(void *handle) | |||
634 | for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) | 634 | for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) |
635 | BUG_ON(vm_inv_eng[i] > 16); | 635 | BUG_ON(vm_inv_eng[i] > 16); |
636 | 636 | ||
637 | if (adev->asic_type == CHIP_VEGA10) { | 637 | if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) { |
638 | r = gmc_v9_0_ecc_available(adev); | 638 | r = gmc_v9_0_ecc_available(adev); |
639 | if (r == 1) { | 639 | if (r == 1) { |
640 | DRM_INFO("ECC is active.\n"); | 640 | DRM_INFO("ECC is active.\n"); |
@@ -682,7 +682,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) | |||
682 | adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); | 682 | adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); |
683 | if (!adev->mc.vram_width) { | 683 | if (!adev->mc.vram_width) { |
684 | /* hbm memory channel size */ | 684 | /* hbm memory channel size */ |
685 | chansize = 128; | 685 | if (adev->flags & AMD_IS_APU) |
686 | chansize = 64; | ||
687 | else | ||
688 | chansize = 128; | ||
686 | 689 | ||
687 | tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); | 690 | tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); |
688 | tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; | 691 | tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e92fb372bc99..91cf95a8c39c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | |||
@@ -238,31 +238,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring) | |||
238 | static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) | 238 | static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) |
239 | { | 239 | { |
240 | struct amdgpu_device *adev = ring->adev; | 240 | struct amdgpu_device *adev = ring->adev; |
241 | u64 *wptr = NULL; | 241 | u64 wptr; |
242 | uint64_t local_wptr = 0; | ||
243 | 242 | ||
244 | if (ring->use_doorbell) { | 243 | if (ring->use_doorbell) { |
245 | /* XXX check if swapping is necessary on BE */ | 244 | /* XXX check if swapping is necessary on BE */ |
246 | wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); | 245 | wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); |
247 | DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); | 246 | DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); |
248 | *wptr = (*wptr) >> 2; | ||
249 | DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); | ||
250 | } else { | 247 | } else { |
251 | u32 lowbit, highbit; | 248 | u32 lowbit, highbit; |
252 | int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; | 249 | int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; |
253 | 250 | ||
254 | wptr = &local_wptr; | ||
255 | lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2; | 251 | lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2; |
256 | highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; | 252 | highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; |
257 | 253 | ||
258 | DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", | 254 | DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", |
259 | me, highbit, lowbit); | 255 | me, highbit, lowbit); |
260 | *wptr = highbit; | 256 | wptr = highbit; |
261 | *wptr = (*wptr) << 32; | 257 | wptr = wptr << 32; |
262 | *wptr |= lowbit; | 258 | wptr |= lowbit; |
263 | } | 259 | } |
264 | 260 | ||
265 | return *wptr; | 261 | return wptr >> 2; |
266 | } | 262 | } |
267 | 263 | ||
268 | /** | 264 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 543101d5a5ed..2095173aaabf 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "amdgpu_uvd.h" | 31 | #include "amdgpu_uvd.h" |
32 | #include "amdgpu_vce.h" | 32 | #include "amdgpu_vce.h" |
33 | #include "atom.h" | 33 | #include "atom.h" |
34 | #include "amd_pcie.h" | ||
34 | #include "amdgpu_powerplay.h" | 35 | #include "amdgpu_powerplay.h" |
35 | #include "sid.h" | 36 | #include "sid.h" |
36 | #include "si_ih.h" | 37 | #include "si_ih.h" |
@@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) | |||
1461 | { | 1462 | { |
1462 | struct pci_dev *root = adev->pdev->bus->self; | 1463 | struct pci_dev *root = adev->pdev->bus->self; |
1463 | int bridge_pos, gpu_pos; | 1464 | int bridge_pos, gpu_pos; |
1464 | u32 speed_cntl, mask, current_data_rate; | 1465 | u32 speed_cntl, current_data_rate; |
1465 | int ret, i; | 1466 | int i; |
1466 | u16 tmp16; | 1467 | u16 tmp16; |
1467 | 1468 | ||
1468 | if (pci_is_root_bus(adev->pdev->bus)) | 1469 | if (pci_is_root_bus(adev->pdev->bus)) |
@@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) | |||
1474 | if (adev->flags & AMD_IS_APU) | 1475 | if (adev->flags & AMD_IS_APU) |
1475 | return; | 1476 | return; |
1476 | 1477 | ||
1477 | ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); | 1478 | if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | |
1478 | if (ret != 0) | 1479 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) |
1479 | return; | ||
1480 | |||
1481 | if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) | ||
1482 | return; | 1480 | return; |
1483 | 1481 | ||
1484 | speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); | 1482 | speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); |
1485 | current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> | 1483 | current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> |
1486 | LC_CURRENT_DATA_RATE_SHIFT; | 1484 | LC_CURRENT_DATA_RATE_SHIFT; |
1487 | if (mask & DRM_PCIE_SPEED_80) { | 1485 | if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { |
1488 | if (current_data_rate == 2) { | 1486 | if (current_data_rate == 2) { |
1489 | DRM_INFO("PCIE gen 3 link speeds already enabled\n"); | 1487 | DRM_INFO("PCIE gen 3 link speeds already enabled\n"); |
1490 | return; | 1488 | return; |
1491 | } | 1489 | } |
1492 | DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); | 1490 | DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); |
1493 | } else if (mask & DRM_PCIE_SPEED_50) { | 1491 | } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) { |
1494 | if (current_data_rate == 1) { | 1492 | if (current_data_rate == 1) { |
1495 | DRM_INFO("PCIE gen 2 link speeds already enabled\n"); | 1493 | DRM_INFO("PCIE gen 2 link speeds already enabled\n"); |
1496 | return; | 1494 | return; |
@@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) | |||
1506 | if (!gpu_pos) | 1504 | if (!gpu_pos) |
1507 | return; | 1505 | return; |
1508 | 1506 | ||
1509 | if (mask & DRM_PCIE_SPEED_80) { | 1507 | if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { |
1510 | if (current_data_rate != 2) { | 1508 | if (current_data_rate != 2) { |
1511 | u16 bridge_cfg, gpu_cfg; | 1509 | u16 bridge_cfg, gpu_cfg; |
1512 | u16 bridge_cfg2, gpu_cfg2; | 1510 | u16 bridge_cfg2, gpu_cfg2; |
@@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) | |||
1589 | 1587 | ||
1590 | pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); | 1588 | pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); |
1591 | tmp16 &= ~0xf; | 1589 | tmp16 &= ~0xf; |
1592 | if (mask & DRM_PCIE_SPEED_80) | 1590 | if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) |
1593 | tmp16 |= 3; | 1591 | tmp16 |= 3; |
1594 | else if (mask & DRM_PCIE_SPEED_50) | 1592 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) |
1595 | tmp16 |= 2; | 1593 | tmp16 |= 2; |
1596 | else | 1594 | else |
1597 | tmp16 |= 1; | 1595 | tmp16 |= 1; |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index ce675a7f179a..22f0b7ff3ac9 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "amdgpu_pm.h" | 26 | #include "amdgpu_pm.h" |
27 | #include "amdgpu_dpm.h" | 27 | #include "amdgpu_dpm.h" |
28 | #include "amdgpu_atombios.h" | 28 | #include "amdgpu_atombios.h" |
29 | #include "amd_pcie.h" | ||
29 | #include "sid.h" | 30 | #include "sid.h" |
30 | #include "r600_dpm.h" | 31 | #include "r600_dpm.h" |
31 | #include "si_dpm.h" | 32 | #include "si_dpm.h" |
@@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, | |||
3331 | } | 3332 | } |
3332 | } | 3333 | } |
3333 | 3334 | ||
3334 | static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev, | ||
3335 | u32 sys_mask, | ||
3336 | enum amdgpu_pcie_gen asic_gen, | ||
3337 | enum amdgpu_pcie_gen default_gen) | ||
3338 | { | ||
3339 | switch (asic_gen) { | ||
3340 | case AMDGPU_PCIE_GEN1: | ||
3341 | return AMDGPU_PCIE_GEN1; | ||
3342 | case AMDGPU_PCIE_GEN2: | ||
3343 | return AMDGPU_PCIE_GEN2; | ||
3344 | case AMDGPU_PCIE_GEN3: | ||
3345 | return AMDGPU_PCIE_GEN3; | ||
3346 | default: | ||
3347 | if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3)) | ||
3348 | return AMDGPU_PCIE_GEN3; | ||
3349 | else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2)) | ||
3350 | return AMDGPU_PCIE_GEN2; | ||
3351 | else | ||
3352 | return AMDGPU_PCIE_GEN1; | ||
3353 | } | ||
3354 | return AMDGPU_PCIE_GEN1; | ||
3355 | } | ||
3356 | |||
3357 | static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, | 3335 | static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, |
3358 | u32 *p, u32 *u) | 3336 | u32 *p, u32 *u) |
3359 | { | 3337 | { |
@@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, | |||
5028 | table->ACPIState.levels[0].vddc.index, | 5006 | table->ACPIState.levels[0].vddc.index, |
5029 | &table->ACPIState.levels[0].std_vddc); | 5007 | &table->ACPIState.levels[0].std_vddc); |
5030 | } | 5008 | } |
5031 | table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, | 5009 | table->ACPIState.levels[0].gen2PCIE = |
5032 | si_pi->sys_pcie_mask, | 5010 | (u8)amdgpu_get_pcie_gen_support(adev, |
5033 | si_pi->boot_pcie_gen, | 5011 | si_pi->sys_pcie_mask, |
5034 | AMDGPU_PCIE_GEN1); | 5012 | si_pi->boot_pcie_gen, |
5013 | AMDGPU_PCIE_GEN1); | ||
5035 | 5014 | ||
5036 | if (si_pi->vddc_phase_shed_control) | 5015 | if (si_pi->vddc_phase_shed_control) |
5037 | si_populate_phase_shedding_value(adev, | 5016 | si_populate_phase_shedding_value(adev, |
@@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev, | |||
7168 | pl->vddc = le16_to_cpu(clock_info->si.usVDDC); | 7147 | pl->vddc = le16_to_cpu(clock_info->si.usVDDC); |
7169 | pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); | 7148 | pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); |
7170 | pl->flags = le32_to_cpu(clock_info->si.ulFlags); | 7149 | pl->flags = le32_to_cpu(clock_info->si.ulFlags); |
7171 | pl->pcie_gen = r600_get_pcie_gen_support(adev, | 7150 | pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, |
7172 | si_pi->sys_pcie_mask, | 7151 | si_pi->sys_pcie_mask, |
7173 | si_pi->boot_pcie_gen, | 7152 | si_pi->boot_pcie_gen, |
7174 | clock_info->si.ucPCIEGen); | 7153 | clock_info->si.ucPCIEGen); |
7175 | 7154 | ||
7176 | /* patch up vddc if necessary */ | 7155 | /* patch up vddc if necessary */ |
7177 | ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, | 7156 | ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, |
@@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev) | |||
7326 | struct si_power_info *si_pi; | 7305 | struct si_power_info *si_pi; |
7327 | struct atom_clock_dividers dividers; | 7306 | struct atom_clock_dividers dividers; |
7328 | int ret; | 7307 | int ret; |
7329 | u32 mask; | ||
7330 | 7308 | ||
7331 | si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); | 7309 | si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); |
7332 | if (si_pi == NULL) | 7310 | if (si_pi == NULL) |
@@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev) | |||
7336 | eg_pi = &ni_pi->eg; | 7314 | eg_pi = &ni_pi->eg; |
7337 | pi = &eg_pi->rv7xx; | 7315 | pi = &eg_pi->rv7xx; |
7338 | 7316 | ||
7339 | ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); | 7317 | si_pi->sys_pcie_mask = |
7340 | if (ret) | 7318 | (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> |
7341 | si_pi->sys_pcie_mask = 0; | 7319 | CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT; |
7342 | else | ||
7343 | si_pi->sys_pcie_mask = mask; | ||
7344 | si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; | 7320 | si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; |
7345 | si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); | 7321 | si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); |
7346 | 7322 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index b2bfedaf57f1..9bab4842cd44 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
@@ -1618,7 +1618,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { | |||
1618 | .set_wptr = uvd_v6_0_enc_ring_set_wptr, | 1618 | .set_wptr = uvd_v6_0_enc_ring_set_wptr, |
1619 | .emit_frame_size = | 1619 | .emit_frame_size = |
1620 | 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ | 1620 | 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ |
1621 | 6 + /* uvd_v6_0_enc_ring_emit_vm_flush */ | 1621 | 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */ |
1622 | 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ | 1622 | 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ |
1623 | 1, /* uvd_v6_0_enc_ring_insert_end */ | 1623 | 1, /* uvd_v6_0_enc_ring_insert_end */ |
1624 | .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ | 1624 | .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1ce4c98385e3..c345e645f1d7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -629,11 +629,13 @@ static int dm_resume(void *handle) | |||
629 | { | 629 | { |
630 | struct amdgpu_device *adev = handle; | 630 | struct amdgpu_device *adev = handle; |
631 | struct amdgpu_display_manager *dm = &adev->dm; | 631 | struct amdgpu_display_manager *dm = &adev->dm; |
632 | int ret = 0; | ||
632 | 633 | ||
633 | /* power on hardware */ | 634 | /* power on hardware */ |
634 | dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); | 635 | dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); |
635 | 636 | ||
636 | return 0; | 637 | ret = amdgpu_dm_display_resume(adev); |
638 | return ret; | ||
637 | } | 639 | } |
638 | 640 | ||
639 | int amdgpu_dm_display_resume(struct amdgpu_device *adev) | 641 | int amdgpu_dm_display_resume(struct amdgpu_device *adev) |
@@ -1035,6 +1037,10 @@ static void handle_hpd_rx_irq(void *param) | |||
1035 | !is_mst_root_connector) { | 1037 | !is_mst_root_connector) { |
1036 | /* Downstream Port status changed. */ | 1038 | /* Downstream Port status changed. */ |
1037 | if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { | 1039 | if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { |
1040 | |||
1041 | if (aconnector->fake_enable) | ||
1042 | aconnector->fake_enable = false; | ||
1043 | |||
1038 | amdgpu_dm_update_connector_after_detect(aconnector); | 1044 | amdgpu_dm_update_connector_after_detect(aconnector); |
1039 | 1045 | ||
1040 | 1046 | ||
@@ -2010,30 +2016,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode, | |||
2010 | dst.width = stream->timing.h_addressable; | 2016 | dst.width = stream->timing.h_addressable; |
2011 | dst.height = stream->timing.v_addressable; | 2017 | dst.height = stream->timing.v_addressable; |
2012 | 2018 | ||
2013 | rmx_type = dm_state->scaling; | 2019 | if (dm_state) { |
2014 | if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { | 2020 | rmx_type = dm_state->scaling; |
2015 | if (src.width * dst.height < | 2021 | if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { |
2016 | src.height * dst.width) { | 2022 | if (src.width * dst.height < |
2017 | /* height needs less upscaling/more downscaling */ | 2023 | src.height * dst.width) { |
2018 | dst.width = src.width * | 2024 | /* height needs less upscaling/more downscaling */ |
2019 | dst.height / src.height; | 2025 | dst.width = src.width * |
2020 | } else { | 2026 | dst.height / src.height; |
2021 | /* width needs less upscaling/more downscaling */ | 2027 | } else { |
2022 | dst.height = src.height * | 2028 | /* width needs less upscaling/more downscaling */ |
2023 | dst.width / src.width; | 2029 | dst.height = src.height * |
2030 | dst.width / src.width; | ||
2031 | } | ||
2032 | } else if (rmx_type == RMX_CENTER) { | ||
2033 | dst = src; | ||
2024 | } | 2034 | } |
2025 | } else if (rmx_type == RMX_CENTER) { | ||
2026 | dst = src; | ||
2027 | } | ||
2028 | 2035 | ||
2029 | dst.x = (stream->timing.h_addressable - dst.width) / 2; | 2036 | dst.x = (stream->timing.h_addressable - dst.width) / 2; |
2030 | dst.y = (stream->timing.v_addressable - dst.height) / 2; | 2037 | dst.y = (stream->timing.v_addressable - dst.height) / 2; |
2031 | 2038 | ||
2032 | if (dm_state->underscan_enable) { | 2039 | if (dm_state->underscan_enable) { |
2033 | dst.x += dm_state->underscan_hborder / 2; | 2040 | dst.x += dm_state->underscan_hborder / 2; |
2034 | dst.y += dm_state->underscan_vborder / 2; | 2041 | dst.y += dm_state->underscan_vborder / 2; |
2035 | dst.width -= dm_state->underscan_hborder; | 2042 | dst.width -= dm_state->underscan_hborder; |
2036 | dst.height -= dm_state->underscan_vborder; | 2043 | dst.height -= dm_state->underscan_vborder; |
2044 | } | ||
2037 | } | 2045 | } |
2038 | 2046 | ||
2039 | stream->src = src; | 2047 | stream->src = src; |
@@ -2358,12 +2366,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
2358 | 2366 | ||
2359 | if (aconnector == NULL) { | 2367 | if (aconnector == NULL) { |
2360 | DRM_ERROR("aconnector is NULL!\n"); | 2368 | DRM_ERROR("aconnector is NULL!\n"); |
2361 | goto drm_connector_null; | 2369 | return stream; |
2362 | } | ||
2363 | |||
2364 | if (dm_state == NULL) { | ||
2365 | DRM_ERROR("dm_state is NULL!\n"); | ||
2366 | goto dm_state_null; | ||
2367 | } | 2370 | } |
2368 | 2371 | ||
2369 | drm_connector = &aconnector->base; | 2372 | drm_connector = &aconnector->base; |
@@ -2375,18 +2378,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
2375 | */ | 2378 | */ |
2376 | if (aconnector->mst_port) { | 2379 | if (aconnector->mst_port) { |
2377 | dm_dp_mst_dc_sink_create(drm_connector); | 2380 | dm_dp_mst_dc_sink_create(drm_connector); |
2378 | goto mst_dc_sink_create_done; | 2381 | return stream; |
2379 | } | 2382 | } |
2380 | 2383 | ||
2381 | if (create_fake_sink(aconnector)) | 2384 | if (create_fake_sink(aconnector)) |
2382 | goto stream_create_fail; | 2385 | return stream; |
2383 | } | 2386 | } |
2384 | 2387 | ||
2385 | stream = dc_create_stream_for_sink(aconnector->dc_sink); | 2388 | stream = dc_create_stream_for_sink(aconnector->dc_sink); |
2386 | 2389 | ||
2387 | if (stream == NULL) { | 2390 | if (stream == NULL) { |
2388 | DRM_ERROR("Failed to create stream for sink!\n"); | 2391 | DRM_ERROR("Failed to create stream for sink!\n"); |
2389 | goto stream_create_fail; | 2392 | return stream; |
2390 | } | 2393 | } |
2391 | 2394 | ||
2392 | list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { | 2395 | list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { |
@@ -2412,9 +2415,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
2412 | } else { | 2415 | } else { |
2413 | decide_crtc_timing_for_drm_display_mode( | 2416 | decide_crtc_timing_for_drm_display_mode( |
2414 | &mode, preferred_mode, | 2417 | &mode, preferred_mode, |
2415 | dm_state->scaling != RMX_OFF); | 2418 | dm_state ? (dm_state->scaling != RMX_OFF) : false); |
2416 | } | 2419 | } |
2417 | 2420 | ||
2421 | if (!dm_state) | ||
2422 | drm_mode_set_crtcinfo(&mode, 0); | ||
2423 | |||
2418 | fill_stream_properties_from_drm_display_mode(stream, | 2424 | fill_stream_properties_from_drm_display_mode(stream, |
2419 | &mode, &aconnector->base); | 2425 | &mode, &aconnector->base); |
2420 | update_stream_scaling_settings(&mode, dm_state, stream); | 2426 | update_stream_scaling_settings(&mode, dm_state, stream); |
@@ -2424,10 +2430,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
2424 | drm_connector, | 2430 | drm_connector, |
2425 | aconnector->dc_sink); | 2431 | aconnector->dc_sink); |
2426 | 2432 | ||
2427 | stream_create_fail: | 2433 | update_stream_signal(stream); |
2428 | dm_state_null: | 2434 | |
2429 | drm_connector_null: | ||
2430 | mst_dc_sink_create_done: | ||
2431 | return stream; | 2435 | return stream; |
2432 | } | 2436 | } |
2433 | 2437 | ||
@@ -2495,6 +2499,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) | |||
2495 | return &state->base; | 2499 | return &state->base; |
2496 | } | 2500 | } |
2497 | 2501 | ||
2502 | |||
2503 | static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) | ||
2504 | { | ||
2505 | enum dc_irq_source irq_source; | ||
2506 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | ||
2507 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2508 | |||
2509 | irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; | ||
2510 | return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; | ||
2511 | } | ||
2512 | |||
2513 | static int dm_enable_vblank(struct drm_crtc *crtc) | ||
2514 | { | ||
2515 | return dm_set_vblank(crtc, true); | ||
2516 | } | ||
2517 | |||
2518 | static void dm_disable_vblank(struct drm_crtc *crtc) | ||
2519 | { | ||
2520 | dm_set_vblank(crtc, false); | ||
2521 | } | ||
2522 | |||
2498 | /* Implemented only the options currently availible for the driver */ | 2523 | /* Implemented only the options currently availible for the driver */ |
2499 | static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { | 2524 | static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { |
2500 | .reset = dm_crtc_reset_state, | 2525 | .reset = dm_crtc_reset_state, |
@@ -2504,6 +2529,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { | |||
2504 | .page_flip = drm_atomic_helper_page_flip, | 2529 | .page_flip = drm_atomic_helper_page_flip, |
2505 | .atomic_duplicate_state = dm_crtc_duplicate_state, | 2530 | .atomic_duplicate_state = dm_crtc_duplicate_state, |
2506 | .atomic_destroy_state = dm_crtc_destroy_state, | 2531 | .atomic_destroy_state = dm_crtc_destroy_state, |
2532 | .enable_vblank = dm_enable_vblank, | ||
2533 | .disable_vblank = dm_disable_vblank, | ||
2507 | }; | 2534 | }; |
2508 | 2535 | ||
2509 | static enum drm_connector_status | 2536 | static enum drm_connector_status |
@@ -2798,7 +2825,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector, | |||
2798 | goto fail; | 2825 | goto fail; |
2799 | } | 2826 | } |
2800 | 2827 | ||
2801 | stream = dc_create_stream_for_sink(dc_sink); | 2828 | stream = create_stream_for_sink(aconnector, mode, NULL); |
2802 | if (stream == NULL) { | 2829 | if (stream == NULL) { |
2803 | DRM_ERROR("Failed to create stream for sink!\n"); | 2830 | DRM_ERROR("Failed to create stream for sink!\n"); |
2804 | goto fail; | 2831 | goto fail; |
@@ -3058,6 +3085,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane, | |||
3058 | if (!dm_plane_state->dc_state) | 3085 | if (!dm_plane_state->dc_state) |
3059 | return 0; | 3086 | return 0; |
3060 | 3087 | ||
3088 | if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state)) | ||
3089 | return -EINVAL; | ||
3090 | |||
3061 | if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) | 3091 | if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) |
3062 | return 0; | 3092 | return 0; |
3063 | 3093 | ||
@@ -4630,8 +4660,6 @@ static int dm_update_planes_state(struct dc *dc, | |||
4630 | bool pflip_needed = !state->allow_modeset; | 4660 | bool pflip_needed = !state->allow_modeset; |
4631 | int ret = 0; | 4661 | int ret = 0; |
4632 | 4662 | ||
4633 | if (pflip_needed) | ||
4634 | return ret; | ||
4635 | 4663 | ||
4636 | /* Add new planes */ | 4664 | /* Add new planes */ |
4637 | for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { | 4665 | for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { |
@@ -4646,6 +4674,8 @@ static int dm_update_planes_state(struct dc *dc, | |||
4646 | 4674 | ||
4647 | /* Remove any changed/removed planes */ | 4675 | /* Remove any changed/removed planes */ |
4648 | if (!enable) { | 4676 | if (!enable) { |
4677 | if (pflip_needed) | ||
4678 | continue; | ||
4649 | 4679 | ||
4650 | if (!old_plane_crtc) | 4680 | if (!old_plane_crtc) |
4651 | continue; | 4681 | continue; |
@@ -4677,6 +4707,7 @@ static int dm_update_planes_state(struct dc *dc, | |||
4677 | *lock_and_validation_needed = true; | 4707 | *lock_and_validation_needed = true; |
4678 | 4708 | ||
4679 | } else { /* Add new planes */ | 4709 | } else { /* Add new planes */ |
4710 | struct dc_plane_state *dc_new_plane_state; | ||
4680 | 4711 | ||
4681 | if (drm_atomic_plane_disabling(plane->state, new_plane_state)) | 4712 | if (drm_atomic_plane_disabling(plane->state, new_plane_state)) |
4682 | continue; | 4713 | continue; |
@@ -4690,38 +4721,50 @@ static int dm_update_planes_state(struct dc *dc, | |||
4690 | if (!dm_new_crtc_state->stream) | 4721 | if (!dm_new_crtc_state->stream) |
4691 | continue; | 4722 | continue; |
4692 | 4723 | ||
4724 | if (pflip_needed) | ||
4725 | continue; | ||
4693 | 4726 | ||
4694 | WARN_ON(dm_new_plane_state->dc_state); | 4727 | WARN_ON(dm_new_plane_state->dc_state); |
4695 | 4728 | ||
4696 | dm_new_plane_state->dc_state = dc_create_plane_state(dc); | 4729 | dc_new_plane_state = dc_create_plane_state(dc); |
4697 | 4730 | if (!dc_new_plane_state) { | |
4698 | DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", | ||
4699 | plane->base.id, new_plane_crtc->base.id); | ||
4700 | |||
4701 | if (!dm_new_plane_state->dc_state) { | ||
4702 | ret = -EINVAL; | 4731 | ret = -EINVAL; |
4703 | return ret; | 4732 | return ret; |
4704 | } | 4733 | } |
4705 | 4734 | ||
4735 | DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", | ||
4736 | plane->base.id, new_plane_crtc->base.id); | ||
4737 | |||
4706 | ret = fill_plane_attributes( | 4738 | ret = fill_plane_attributes( |
4707 | new_plane_crtc->dev->dev_private, | 4739 | new_plane_crtc->dev->dev_private, |
4708 | dm_new_plane_state->dc_state, | 4740 | dc_new_plane_state, |
4709 | new_plane_state, | 4741 | new_plane_state, |
4710 | new_crtc_state); | 4742 | new_crtc_state); |
4711 | if (ret) | 4743 | if (ret) { |
4744 | dc_plane_state_release(dc_new_plane_state); | ||
4712 | return ret; | 4745 | return ret; |
4746 | } | ||
4713 | 4747 | ||
4714 | 4748 | /* | |
4749 | * Any atomic check errors that occur after this will | ||
4750 | * not need a release. The plane state will be attached | ||
4751 | * to the stream, and therefore part of the atomic | ||
4752 | * state. It'll be released when the atomic state is | ||
4753 | * cleaned. | ||
4754 | */ | ||
4715 | if (!dc_add_plane_to_context( | 4755 | if (!dc_add_plane_to_context( |
4716 | dc, | 4756 | dc, |
4717 | dm_new_crtc_state->stream, | 4757 | dm_new_crtc_state->stream, |
4718 | dm_new_plane_state->dc_state, | 4758 | dc_new_plane_state, |
4719 | dm_state->context)) { | 4759 | dm_state->context)) { |
4720 | 4760 | ||
4761 | dc_plane_state_release(dc_new_plane_state); | ||
4721 | ret = -EINVAL; | 4762 | ret = -EINVAL; |
4722 | return ret; | 4763 | return ret; |
4723 | } | 4764 | } |
4724 | 4765 | ||
4766 | dm_new_plane_state->dc_state = dc_new_plane_state; | ||
4767 | |||
4725 | /* Tell DC to do a full surface update every time there | 4768 | /* Tell DC to do a full surface update every time there |
4726 | * is a plane change. Inefficient, but works for now. | 4769 | * is a plane change. Inefficient, but works for now. |
4727 | */ | 4770 | */ |
@@ -4735,6 +4778,30 @@ static int dm_update_planes_state(struct dc *dc, | |||
4735 | return ret; | 4778 | return ret; |
4736 | } | 4779 | } |
4737 | 4780 | ||
4781 | static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state, | ||
4782 | struct drm_crtc *crtc) | ||
4783 | { | ||
4784 | struct drm_plane *plane; | ||
4785 | struct drm_crtc_state *crtc_state; | ||
4786 | |||
4787 | WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); | ||
4788 | |||
4789 | drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { | ||
4790 | struct drm_plane_state *plane_state = | ||
4791 | drm_atomic_get_plane_state(state, plane); | ||
4792 | |||
4793 | if (IS_ERR(plane_state)) | ||
4794 | return -EDEADLK; | ||
4795 | |||
4796 | crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); | ||
4797 | if (crtc->primary == plane && crtc_state->active) { | ||
4798 | if (!plane_state->fb) | ||
4799 | return -EINVAL; | ||
4800 | } | ||
4801 | } | ||
4802 | return 0; | ||
4803 | } | ||
4804 | |||
4738 | static int amdgpu_dm_atomic_check(struct drm_device *dev, | 4805 | static int amdgpu_dm_atomic_check(struct drm_device *dev, |
4739 | struct drm_atomic_state *state) | 4806 | struct drm_atomic_state *state) |
4740 | { | 4807 | { |
@@ -4758,6 +4825,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
4758 | goto fail; | 4825 | goto fail; |
4759 | 4826 | ||
4760 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 4827 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
4828 | ret = dm_atomic_check_plane_state_fb(state, crtc); | ||
4829 | if (ret) | ||
4830 | goto fail; | ||
4831 | |||
4761 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && | 4832 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && |
4762 | !new_crtc_state->color_mgmt_changed) | 4833 | !new_crtc_state->color_mgmt_changed) |
4763 | continue; | 4834 | continue; |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 1874b6cee6af..422055080df4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | |||
@@ -683,10 +683,8 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = { | |||
683 | 683 | ||
684 | void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) | 684 | void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) |
685 | { | 685 | { |
686 | if (adev->mode_info.num_crtc > 0) | 686 | |
687 | adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; | 687 | adev->crtc_irq.num_types = adev->mode_info.num_crtc; |
688 | else | ||
689 | adev->crtc_irq.num_types = 0; | ||
690 | adev->crtc_irq.funcs = &dm_crtc_irq_funcs; | 688 | adev->crtc_irq.funcs = &dm_crtc_irq_funcs; |
691 | 689 | ||
692 | adev->pageflip_irq.num_types = adev->mode_info.num_crtc; | 690 | adev->pageflip_irq.num_types = adev->mode_info.num_crtc; |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index f3d87f418d2e..93421dad21bd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | |||
@@ -189,6 +189,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector) | |||
189 | .link = aconnector->dc_link, | 189 | .link = aconnector->dc_link, |
190 | .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; | 190 | .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; |
191 | 191 | ||
192 | /* | ||
193 | * TODO: Need to further figure out why ddc.algo is NULL while MST port exists | ||
194 | */ | ||
195 | if (!aconnector->port || !aconnector->port->aux.ddc.algo) | ||
196 | return; | ||
197 | |||
192 | edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); | 198 | edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); |
193 | 199 | ||
194 | if (!edid) { | 200 | if (!edid) { |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 35e84ed031de..12868c769606 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c | |||
@@ -1358,13 +1358,13 @@ enum dc_irq_source dc_interrupt_to_irq_source( | |||
1358 | return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); | 1358 | return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); |
1359 | } | 1359 | } |
1360 | 1360 | ||
1361 | void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) | 1361 | bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) |
1362 | { | 1362 | { |
1363 | 1363 | ||
1364 | if (dc == NULL) | 1364 | if (dc == NULL) |
1365 | return; | 1365 | return false; |
1366 | 1366 | ||
1367 | dal_irq_service_set(dc->res_pool->irqs, src, enable); | 1367 | return dal_irq_service_set(dc->res_pool->irqs, src, enable); |
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) | 1370 | void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index a37428271573..be5546181fa8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
@@ -1749,8 +1749,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) | |||
1749 | link->link_enc, | 1749 | link->link_enc, |
1750 | pipe_ctx->clock_source->id, | 1750 | pipe_ctx->clock_source->id, |
1751 | display_color_depth, | 1751 | display_color_depth, |
1752 | pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A, | 1752 | pipe_ctx->stream->signal, |
1753 | pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK, | ||
1754 | stream->phy_pix_clk); | 1753 | stream->phy_pix_clk); |
1755 | 1754 | ||
1756 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) | 1755 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 33d91e4474ea..639421a00ab6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | |||
@@ -1465,7 +1465,7 @@ void decide_link_settings(struct dc_stream_state *stream, | |||
1465 | /* MST doesn't perform link training for now | 1465 | /* MST doesn't perform link training for now |
1466 | * TODO: add MST specific link training routine | 1466 | * TODO: add MST specific link training routine |
1467 | */ | 1467 | */ |
1468 | if (is_mst_supported(link)) { | 1468 | if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { |
1469 | *link_setting = link->verified_link_cap; | 1469 | *link_setting = link->verified_link_cap; |
1470 | return; | 1470 | return; |
1471 | } | 1471 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 95b8dd0e53c6..4d07ffebfd31 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c | |||
@@ -1360,9 +1360,6 @@ bool dc_is_stream_scaling_unchanged( | |||
1360 | return true; | 1360 | return true; |
1361 | } | 1361 | } |
1362 | 1362 | ||
1363 | /* Maximum TMDS single link pixel clock 165MHz */ | ||
1364 | #define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000 | ||
1365 | |||
1366 | static void update_stream_engine_usage( | 1363 | static void update_stream_engine_usage( |
1367 | struct resource_context *res_ctx, | 1364 | struct resource_context *res_ctx, |
1368 | const struct resource_pool *pool, | 1365 | const struct resource_pool *pool, |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 261811e0c094..cd5819789d76 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c | |||
@@ -33,8 +33,7 @@ | |||
33 | /******************************************************************************* | 33 | /******************************************************************************* |
34 | * Private functions | 34 | * Private functions |
35 | ******************************************************************************/ | 35 | ******************************************************************************/ |
36 | #define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000 | 36 | void update_stream_signal(struct dc_stream_state *stream) |
37 | static void update_stream_signal(struct dc_stream_state *stream) | ||
38 | { | 37 | { |
39 | 38 | ||
40 | struct dc_sink *dc_sink = stream->sink; | 39 | struct dc_sink *dc_sink = stream->sink; |
@@ -45,8 +44,9 @@ static void update_stream_signal(struct dc_stream_state *stream) | |||
45 | stream->signal = dc_sink->sink_signal; | 44 | stream->signal = dc_sink->sink_signal; |
46 | 45 | ||
47 | if (dc_is_dvi_signal(stream->signal)) { | 46 | if (dc_is_dvi_signal(stream->signal)) { |
48 | if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST && | 47 | if (stream->ctx->dc->caps.dual_link_dvi && |
49 | stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) | 48 | stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK && |
49 | stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) | ||
50 | stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK; | 50 | stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK; |
51 | else | 51 | else |
52 | stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; | 52 | stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; |
@@ -193,44 +193,20 @@ bool dc_stream_set_cursor_attributes( | |||
193 | 193 | ||
194 | core_dc = stream->ctx->dc; | 194 | core_dc = stream->ctx->dc; |
195 | res_ctx = &core_dc->current_state->res_ctx; | 195 | res_ctx = &core_dc->current_state->res_ctx; |
196 | stream->cursor_attributes = *attributes; | ||
196 | 197 | ||
197 | for (i = 0; i < MAX_PIPES; i++) { | 198 | for (i = 0; i < MAX_PIPES; i++) { |
198 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; | 199 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; |
199 | 200 | ||
200 | if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp)) | 201 | if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && |
202 | !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp) | ||
201 | continue; | 203 | continue; |
202 | if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) | 204 | if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) |
203 | continue; | 205 | continue; |
204 | 206 | ||
205 | 207 | ||
206 | if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL) | 208 | core_dc->hwss.set_cursor_attribute(pipe_ctx); |
207 | pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes( | ||
208 | pipe_ctx->plane_res.ipp, attributes); | ||
209 | |||
210 | if (pipe_ctx->plane_res.hubp != NULL && | ||
211 | pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL) | ||
212 | pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes( | ||
213 | pipe_ctx->plane_res.hubp, attributes); | ||
214 | |||
215 | if (pipe_ctx->plane_res.mi != NULL && | ||
216 | pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL) | ||
217 | pipe_ctx->plane_res.mi->funcs->set_cursor_attributes( | ||
218 | pipe_ctx->plane_res.mi, attributes); | ||
219 | |||
220 | |||
221 | if (pipe_ctx->plane_res.xfm != NULL && | ||
222 | pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL) | ||
223 | pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes( | ||
224 | pipe_ctx->plane_res.xfm, attributes); | ||
225 | |||
226 | if (pipe_ctx->plane_res.dpp != NULL && | ||
227 | pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL) | ||
228 | pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes( | ||
229 | pipe_ctx->plane_res.dpp, attributes->color_format); | ||
230 | } | 209 | } |
231 | |||
232 | stream->cursor_attributes = *attributes; | ||
233 | |||
234 | return true; | 210 | return true; |
235 | } | 211 | } |
236 | 212 | ||
@@ -254,55 +230,21 @@ bool dc_stream_set_cursor_position( | |||
254 | 230 | ||
255 | core_dc = stream->ctx->dc; | 231 | core_dc = stream->ctx->dc; |
256 | res_ctx = &core_dc->current_state->res_ctx; | 232 | res_ctx = &core_dc->current_state->res_ctx; |
233 | stream->cursor_position = *position; | ||
257 | 234 | ||
258 | for (i = 0; i < MAX_PIPES; i++) { | 235 | for (i = 0; i < MAX_PIPES; i++) { |
259 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; | 236 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; |
260 | struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; | ||
261 | struct mem_input *mi = pipe_ctx->plane_res.mi; | ||
262 | struct hubp *hubp = pipe_ctx->plane_res.hubp; | ||
263 | struct dpp *dpp = pipe_ctx->plane_res.dpp; | ||
264 | struct dc_cursor_position pos_cpy = *position; | ||
265 | struct dc_cursor_mi_param param = { | ||
266 | .pixel_clk_khz = stream->timing.pix_clk_khz, | ||
267 | .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz, | ||
268 | .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, | ||
269 | .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, | ||
270 | .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz | ||
271 | }; | ||
272 | 237 | ||
273 | if (pipe_ctx->stream != stream || | 238 | if (pipe_ctx->stream != stream || |
274 | (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || | 239 | (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || |
275 | !pipe_ctx->plane_state || | 240 | !pipe_ctx->plane_state || |
276 | (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp)) | 241 | (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) || |
277 | continue; | 242 | !pipe_ctx->plane_res.ipp) |
278 | |||
279 | if (pipe_ctx->plane_state->address.type | ||
280 | == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) | ||
281 | pos_cpy.enable = false; | ||
282 | |||
283 | if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) | ||
284 | pos_cpy.enable = false; | ||
285 | |||
286 | |||
287 | if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL) | ||
288 | ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, ¶m); | ||
289 | |||
290 | if (mi != NULL && mi->funcs->set_cursor_position != NULL) | ||
291 | mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m); | ||
292 | |||
293 | if (!hubp) | ||
294 | continue; | 243 | continue; |
295 | 244 | ||
296 | if (hubp->funcs->set_cursor_position != NULL) | 245 | core_dc->hwss.set_cursor_position(pipe_ctx); |
297 | hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); | ||
298 | |||
299 | if (dpp != NULL && dpp->funcs->set_cursor_position != NULL) | ||
300 | dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width); | ||
301 | |||
302 | } | 246 | } |
303 | 247 | ||
304 | stream->cursor_position = *position; | ||
305 | |||
306 | return true; | 248 | return true; |
307 | } | 249 | } |
308 | 250 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index e2e3c9df79ea..d6d56611604e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h | |||
@@ -62,6 +62,7 @@ struct dc_caps { | |||
62 | bool dcc_const_color; | 62 | bool dcc_const_color; |
63 | bool dynamic_audio; | 63 | bool dynamic_audio; |
64 | bool is_apu; | 64 | bool is_apu; |
65 | bool dual_link_dvi; | ||
65 | }; | 66 | }; |
66 | 67 | ||
67 | struct dc_dcc_surface_param { | 68 | struct dc_dcc_surface_param { |
@@ -672,7 +673,7 @@ enum dc_irq_source dc_interrupt_to_irq_source( | |||
672 | struct dc *dc, | 673 | struct dc *dc, |
673 | uint32_t src_id, | 674 | uint32_t src_id, |
674 | uint32_t ext_id); | 675 | uint32_t ext_id); |
675 | void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable); | 676 | bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable); |
676 | void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src); | 677 | void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src); |
677 | enum dc_irq_source dc_get_hpd_irq_source_at_index( | 678 | enum dc_irq_source dc_get_hpd_irq_source_at_index( |
678 | struct dc *dc, uint32_t link_index); | 679 | struct dc *dc, uint32_t link_index); |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 01c60f11b2bd..456e4d29eadd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h | |||
@@ -237,6 +237,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream( | |||
237 | */ | 237 | */ |
238 | struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); | 238 | struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); |
239 | 239 | ||
240 | void update_stream_signal(struct dc_stream_state *stream); | ||
241 | |||
240 | void dc_stream_retain(struct dc_stream_state *dc_stream); | 242 | void dc_stream_retain(struct dc_stream_state *dc_stream); |
241 | void dc_stream_release(struct dc_stream_state *dc_stream); | 243 | void dc_stream_release(struct dc_stream_state *dc_stream); |
242 | 244 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index b73db9e78437..a993279a8f2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | |||
@@ -236,6 +236,7 @@ | |||
236 | SR(D2VGA_CONTROL), \ | 236 | SR(D2VGA_CONTROL), \ |
237 | SR(D3VGA_CONTROL), \ | 237 | SR(D3VGA_CONTROL), \ |
238 | SR(D4VGA_CONTROL), \ | 238 | SR(D4VGA_CONTROL), \ |
239 | SR(VGA_TEST_CONTROL), \ | ||
239 | SR(DC_IP_REQUEST_CNTL), \ | 240 | SR(DC_IP_REQUEST_CNTL), \ |
240 | BL_REG_LIST() | 241 | BL_REG_LIST() |
241 | 242 | ||
@@ -337,6 +338,7 @@ struct dce_hwseq_registers { | |||
337 | uint32_t D2VGA_CONTROL; | 338 | uint32_t D2VGA_CONTROL; |
338 | uint32_t D3VGA_CONTROL; | 339 | uint32_t D3VGA_CONTROL; |
339 | uint32_t D4VGA_CONTROL; | 340 | uint32_t D4VGA_CONTROL; |
341 | uint32_t VGA_TEST_CONTROL; | ||
340 | /* MMHUB registers. read only. temporary hack */ | 342 | /* MMHUB registers. read only. temporary hack */ |
341 | uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; | 343 | uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; |
342 | uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; | 344 | uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; |
@@ -493,6 +495,9 @@ struct dce_hwseq_registers { | |||
493 | HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \ | 495 | HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \ |
494 | HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ | 496 | HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ |
495 | HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ | 497 | HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ |
498 | HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\ | ||
499 | HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\ | ||
500 | HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\ | ||
496 | HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ | 501 | HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ |
497 | HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) | 502 | HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) |
498 | 503 | ||
@@ -583,7 +588,10 @@ struct dce_hwseq_registers { | |||
583 | type DCFCLK_GATE_DIS; \ | 588 | type DCFCLK_GATE_DIS; \ |
584 | type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ | 589 | type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ |
585 | type DENTIST_DPPCLK_WDIVIDER; \ | 590 | type DENTIST_DPPCLK_WDIVIDER; \ |
586 | type DENTIST_DISPCLK_WDIVIDER; | 591 | type DENTIST_DISPCLK_WDIVIDER; \ |
592 | type VGA_TEST_ENABLE; \ | ||
593 | type VGA_TEST_RENDER_START; \ | ||
594 | type D1VGA_MODE_ENABLE; | ||
587 | 595 | ||
588 | struct dce_hwseq_shift { | 596 | struct dce_hwseq_shift { |
589 | HWSEQ_REG_FIELD_LIST(uint8_t) | 597 | HWSEQ_REG_FIELD_LIST(uint8_t) |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index a266e3f5e75f..e4741f1a2b01 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | |||
@@ -82,13 +82,6 @@ | |||
82 | #define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20 | 82 | #define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20 |
83 | #define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40 | 83 | #define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40 |
84 | 84 | ||
85 | /* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */ | ||
86 | #define TMDS_MIN_PIXEL_CLOCK 25000 | ||
87 | /* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */ | ||
88 | #define TMDS_MAX_PIXEL_CLOCK 165000 | ||
89 | /* For current ASICs pixel clock - 600MHz */ | ||
90 | #define MAX_ENCODER_CLOCK 600000 | ||
91 | |||
92 | enum { | 85 | enum { |
93 | DP_MST_UPDATE_MAX_RETRY = 50 | 86 | DP_MST_UPDATE_MAX_RETRY = 50 |
94 | }; | 87 | }; |
@@ -683,6 +676,7 @@ void dce110_link_encoder_construct( | |||
683 | { | 676 | { |
684 | struct bp_encoder_cap_info bp_cap_info = {0}; | 677 | struct bp_encoder_cap_info bp_cap_info = {0}; |
685 | const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; | 678 | const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; |
679 | enum bp_result result = BP_RESULT_OK; | ||
686 | 680 | ||
687 | enc110->base.funcs = &dce110_lnk_enc_funcs; | 681 | enc110->base.funcs = &dce110_lnk_enc_funcs; |
688 | enc110->base.ctx = init_data->ctx; | 682 | enc110->base.ctx = init_data->ctx; |
@@ -757,15 +751,24 @@ void dce110_link_encoder_construct( | |||
757 | enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; | 751 | enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; |
758 | } | 752 | } |
759 | 753 | ||
754 | /* default to one to mirror Windows behavior */ | ||
755 | enc110->base.features.flags.bits.HDMI_6GB_EN = 1; | ||
756 | |||
757 | result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios, | ||
758 | enc110->base.id, &bp_cap_info); | ||
759 | |||
760 | /* Override features with DCE-specific values */ | 760 | /* Override features with DCE-specific values */ |
761 | if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info( | 761 | if (BP_RESULT_OK == result) { |
762 | enc110->base.ctx->dc_bios, enc110->base.id, | ||
763 | &bp_cap_info)) { | ||
764 | enc110->base.features.flags.bits.IS_HBR2_CAPABLE = | 762 | enc110->base.features.flags.bits.IS_HBR2_CAPABLE = |
765 | bp_cap_info.DP_HBR2_EN; | 763 | bp_cap_info.DP_HBR2_EN; |
766 | enc110->base.features.flags.bits.IS_HBR3_CAPABLE = | 764 | enc110->base.features.flags.bits.IS_HBR3_CAPABLE = |
767 | bp_cap_info.DP_HBR3_EN; | 765 | bp_cap_info.DP_HBR3_EN; |
768 | enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; | 766 | enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; |
767 | } else { | ||
768 | dm_logger_write(enc110->base.ctx->logger, LOG_WARNING, | ||
769 | "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", | ||
770 | __func__, | ||
771 | result); | ||
769 | } | 772 | } |
770 | } | 773 | } |
771 | 774 | ||
@@ -904,8 +907,7 @@ void dce110_link_encoder_enable_tmds_output( | |||
904 | struct link_encoder *enc, | 907 | struct link_encoder *enc, |
905 | enum clock_source_id clock_source, | 908 | enum clock_source_id clock_source, |
906 | enum dc_color_depth color_depth, | 909 | enum dc_color_depth color_depth, |
907 | bool hdmi, | 910 | enum signal_type signal, |
908 | bool dual_link, | ||
909 | uint32_t pixel_clock) | 911 | uint32_t pixel_clock) |
910 | { | 912 | { |
911 | struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); | 913 | struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); |
@@ -919,16 +921,12 @@ void dce110_link_encoder_enable_tmds_output( | |||
919 | cntl.engine_id = enc->preferred_engine; | 921 | cntl.engine_id = enc->preferred_engine; |
920 | cntl.transmitter = enc110->base.transmitter; | 922 | cntl.transmitter = enc110->base.transmitter; |
921 | cntl.pll_id = clock_source; | 923 | cntl.pll_id = clock_source; |
922 | if (hdmi) { | 924 | cntl.signal = signal; |
923 | cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; | 925 | if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK) |
924 | cntl.lanes_number = 4; | ||
925 | } else if (dual_link) { | ||
926 | cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK; | ||
927 | cntl.lanes_number = 8; | 926 | cntl.lanes_number = 8; |
928 | } else { | 927 | else |
929 | cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; | ||
930 | cntl.lanes_number = 4; | 928 | cntl.lanes_number = 4; |
931 | } | 929 | |
932 | cntl.hpd_sel = enc110->base.hpd_source; | 930 | cntl.hpd_sel = enc110->base.hpd_source; |
933 | 931 | ||
934 | cntl.pixel_clock = pixel_clock; | 932 | cntl.pixel_clock = pixel_clock; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 8ca9afe47a2b..0ec3433d34b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h | |||
@@ -210,8 +210,7 @@ void dce110_link_encoder_enable_tmds_output( | |||
210 | struct link_encoder *enc, | 210 | struct link_encoder *enc, |
211 | enum clock_source_id clock_source, | 211 | enum clock_source_id clock_source, |
212 | enum dc_color_depth color_depth, | 212 | enum dc_color_depth color_depth, |
213 | bool hdmi, | 213 | enum signal_type signal, |
214 | bool dual_link, | ||
215 | uint32_t pixel_clock); | 214 | uint32_t pixel_clock); |
216 | 215 | ||
217 | /* enables DP PHY output */ | 216 | /* enables DP PHY output */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 3ea43e2a9450..442dd2d93618 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c | |||
@@ -852,6 +852,7 @@ static bool construct( | |||
852 | dc->caps.max_downscale_ratio = 200; | 852 | dc->caps.max_downscale_ratio = 200; |
853 | dc->caps.i2c_speed_in_khz = 40; | 853 | dc->caps.i2c_speed_in_khz = 40; |
854 | dc->caps.max_cursor_size = 128; | 854 | dc->caps.max_cursor_size = 128; |
855 | dc->caps.dual_link_dvi = true; | ||
855 | 856 | ||
856 | for (i = 0; i < pool->base.pipe_count; i++) { | 857 | for (i = 0; i < pool->base.pipe_count; i++) { |
857 | pool->base.timing_generators[i] = | 858 | pool->base.timing_generators[i] = |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 86cdd7b4811f..6f382a3ac90f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
@@ -688,15 +688,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) | |||
688 | struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; | 688 | struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; |
689 | struct dc_link *link = pipe_ctx->stream->sink->link; | 689 | struct dc_link *link = pipe_ctx->stream->sink->link; |
690 | 690 | ||
691 | /* 1. update AVI info frame (HDMI, DP) | 691 | |
692 | * we always need to update info frame | ||
693 | */ | ||
694 | uint32_t active_total_with_borders; | 692 | uint32_t active_total_with_borders; |
695 | uint32_t early_control = 0; | 693 | uint32_t early_control = 0; |
696 | struct timing_generator *tg = pipe_ctx->stream_res.tg; | 694 | struct timing_generator *tg = pipe_ctx->stream_res.tg; |
697 | 695 | ||
698 | /* TODOFPGA may change to hwss.update_info_frame */ | 696 | /* For MST, there are multiply stream go to only one link. |
697 | * connect DIG back_end to front_end while enable_stream and | ||
698 | * disconnect them during disable_stream | ||
699 | * BY this, it is logic clean to separate stream and link */ | ||
700 | link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, | ||
701 | pipe_ctx->stream_res.stream_enc->id, true); | ||
702 | |||
703 | /* update AVI info frame (HDMI, DP)*/ | ||
704 | /* TODO: FPGA may change to hwss.update_info_frame */ | ||
699 | dce110_update_info_frame(pipe_ctx); | 705 | dce110_update_info_frame(pipe_ctx); |
706 | |||
700 | /* enable early control to avoid corruption on DP monitor*/ | 707 | /* enable early control to avoid corruption on DP monitor*/ |
701 | active_total_with_borders = | 708 | active_total_with_borders = |
702 | timing->h_addressable | 709 | timing->h_addressable |
@@ -717,12 +724,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) | |||
717 | pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); | 724 | pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); |
718 | } | 725 | } |
719 | 726 | ||
720 | /* For MST, there are multiply stream go to only one link. | 727 | |
721 | * connect DIG back_end to front_end while enable_stream and | 728 | |
722 | * disconnect them during disable_stream | ||
723 | * BY this, it is logic clean to separate stream and link */ | ||
724 | link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, | ||
725 | pipe_ctx->stream_res.stream_enc->id, true); | ||
726 | 729 | ||
727 | } | 730 | } |
728 | 731 | ||
@@ -1690,9 +1693,13 @@ static void apply_min_clocks( | |||
1690 | * Check if FBC can be enabled | 1693 | * Check if FBC can be enabled |
1691 | */ | 1694 | */ |
1692 | static bool should_enable_fbc(struct dc *dc, | 1695 | static bool should_enable_fbc(struct dc *dc, |
1693 | struct dc_state *context) | 1696 | struct dc_state *context, |
1697 | uint32_t *pipe_idx) | ||
1694 | { | 1698 | { |
1695 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; | 1699 | uint32_t i; |
1700 | struct pipe_ctx *pipe_ctx = NULL; | ||
1701 | struct resource_context *res_ctx = &context->res_ctx; | ||
1702 | |||
1696 | 1703 | ||
1697 | ASSERT(dc->fbc_compressor); | 1704 | ASSERT(dc->fbc_compressor); |
1698 | 1705 | ||
@@ -1704,6 +1711,14 @@ static bool should_enable_fbc(struct dc *dc, | |||
1704 | if (context->stream_count != 1) | 1711 | if (context->stream_count != 1) |
1705 | return false; | 1712 | return false; |
1706 | 1713 | ||
1714 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
1715 | if (res_ctx->pipe_ctx[i].stream) { | ||
1716 | pipe_ctx = &res_ctx->pipe_ctx[i]; | ||
1717 | *pipe_idx = i; | ||
1718 | break; | ||
1719 | } | ||
1720 | } | ||
1721 | |||
1707 | /* Only supports eDP */ | 1722 | /* Only supports eDP */ |
1708 | if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) | 1723 | if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) |
1709 | return false; | 1724 | return false; |
@@ -1729,11 +1744,14 @@ static bool should_enable_fbc(struct dc *dc, | |||
1729 | static void enable_fbc(struct dc *dc, | 1744 | static void enable_fbc(struct dc *dc, |
1730 | struct dc_state *context) | 1745 | struct dc_state *context) |
1731 | { | 1746 | { |
1732 | if (should_enable_fbc(dc, context)) { | 1747 | uint32_t pipe_idx = 0; |
1748 | |||
1749 | if (should_enable_fbc(dc, context, &pipe_idx)) { | ||
1733 | /* Program GRPH COMPRESSED ADDRESS and PITCH */ | 1750 | /* Program GRPH COMPRESSED ADDRESS and PITCH */ |
1734 | struct compr_addr_and_pitch_params params = {0, 0, 0}; | 1751 | struct compr_addr_and_pitch_params params = {0, 0, 0}; |
1735 | struct compressor *compr = dc->fbc_compressor; | 1752 | struct compressor *compr = dc->fbc_compressor; |
1736 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; | 1753 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; |
1754 | |||
1737 | 1755 | ||
1738 | params.source_view_width = pipe_ctx->stream->timing.h_addressable; | 1756 | params.source_view_width = pipe_ctx->stream->timing.h_addressable; |
1739 | params.source_view_height = pipe_ctx->stream->timing.v_addressable; | 1757 | params.source_view_height = pipe_ctx->stream->timing.v_addressable; |
@@ -2915,6 +2933,49 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx, | |||
2915 | } | 2933 | } |
2916 | } | 2934 | } |
2917 | 2935 | ||
2936 | void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) | ||
2937 | { | ||
2938 | struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; | ||
2939 | struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; | ||
2940 | struct mem_input *mi = pipe_ctx->plane_res.mi; | ||
2941 | struct dc_cursor_mi_param param = { | ||
2942 | .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, | ||
2943 | .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, | ||
2944 | .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, | ||
2945 | .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, | ||
2946 | .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz | ||
2947 | }; | ||
2948 | |||
2949 | if (pipe_ctx->plane_state->address.type | ||
2950 | == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) | ||
2951 | pos_cpy.enable = false; | ||
2952 | |||
2953 | if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) | ||
2954 | pos_cpy.enable = false; | ||
2955 | |||
2956 | if (ipp->funcs->ipp_cursor_set_position) | ||
2957 | ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, ¶m); | ||
2958 | if (mi->funcs->set_cursor_position) | ||
2959 | mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m); | ||
2960 | } | ||
2961 | |||
2962 | void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) | ||
2963 | { | ||
2964 | struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; | ||
2965 | |||
2966 | if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes) | ||
2967 | pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes( | ||
2968 | pipe_ctx->plane_res.ipp, attributes); | ||
2969 | |||
2970 | if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes) | ||
2971 | pipe_ctx->plane_res.mi->funcs->set_cursor_attributes( | ||
2972 | pipe_ctx->plane_res.mi, attributes); | ||
2973 | |||
2974 | if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes) | ||
2975 | pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes( | ||
2976 | pipe_ctx->plane_res.xfm, attributes); | ||
2977 | } | ||
2978 | |||
2918 | static void ready_shared_resources(struct dc *dc, struct dc_state *context) {} | 2979 | static void ready_shared_resources(struct dc *dc, struct dc_state *context) {} |
2919 | 2980 | ||
2920 | static void optimize_shared_resources(struct dc *dc) {} | 2981 | static void optimize_shared_resources(struct dc *dc) {} |
@@ -2957,6 +3018,8 @@ static const struct hw_sequencer_funcs dce110_funcs = { | |||
2957 | .edp_backlight_control = hwss_edp_backlight_control, | 3018 | .edp_backlight_control = hwss_edp_backlight_control, |
2958 | .edp_power_control = hwss_edp_power_control, | 3019 | .edp_power_control = hwss_edp_power_control, |
2959 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, | 3020 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, |
3021 | .set_cursor_position = dce110_set_cursor_position, | ||
3022 | .set_cursor_attribute = dce110_set_cursor_attribute | ||
2960 | }; | 3023 | }; |
2961 | 3024 | ||
2962 | void dce110_hw_sequencer_construct(struct dc *dc) | 3025 | void dce110_hw_sequencer_construct(struct dc *dc) |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 7c4779578fb7..00f18c485e1e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | |||
@@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth( | |||
846 | return result; | 846 | return result; |
847 | } | 847 | } |
848 | 848 | ||
849 | enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, | ||
850 | struct dc_caps *caps) | ||
851 | { | ||
852 | if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) || | ||
853 | ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height)) | ||
854 | return DC_FAIL_SURFACE_VALIDATE; | ||
855 | |||
856 | return DC_OK; | ||
857 | } | ||
858 | |||
849 | static bool dce110_validate_surface_sets( | 859 | static bool dce110_validate_surface_sets( |
850 | struct dc_state *context) | 860 | struct dc_state *context) |
851 | { | 861 | { |
@@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets( | |||
869 | plane->src_rect.height > 1080)) | 879 | plane->src_rect.height > 1080)) |
870 | return false; | 880 | return false; |
871 | 881 | ||
882 | /* we don't have the logic to support underlay | ||
883 | * only yet so block the use case where we get | ||
884 | * NV12 plane as top layer | ||
885 | */ | ||
886 | if (j == 0) | ||
887 | return false; | ||
888 | |||
872 | /* irrespective of plane format, | 889 | /* irrespective of plane format, |
873 | * stream should be RGB encoded | 890 | * stream should be RGB encoded |
874 | */ | 891 | */ |
@@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = { | |||
1021 | .link_enc_create = dce110_link_encoder_create, | 1038 | .link_enc_create = dce110_link_encoder_create, |
1022 | .validate_guaranteed = dce110_validate_guaranteed, | 1039 | .validate_guaranteed = dce110_validate_guaranteed, |
1023 | .validate_bandwidth = dce110_validate_bandwidth, | 1040 | .validate_bandwidth = dce110_validate_bandwidth, |
1041 | .validate_plane = dce110_validate_plane, | ||
1024 | .acquire_idle_pipe_for_layer = dce110_acquire_underlay, | 1042 | .acquire_idle_pipe_for_layer = dce110_acquire_underlay, |
1025 | .add_stream_to_ctx = dce110_add_stream_to_ctx, | 1043 | .add_stream_to_ctx = dce110_add_stream_to_ctx, |
1026 | .validate_global = dce110_validate_global | 1044 | .validate_global = dce110_validate_global |
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 663e0a047a4b..98d9cd0109e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | |||
@@ -1103,6 +1103,8 @@ static bool construct( | |||
1103 | dc->caps.max_downscale_ratio = 200; | 1103 | dc->caps.max_downscale_ratio = 200; |
1104 | dc->caps.i2c_speed_in_khz = 100; | 1104 | dc->caps.i2c_speed_in_khz = 100; |
1105 | dc->caps.max_cursor_size = 128; | 1105 | dc->caps.max_cursor_size = 128; |
1106 | dc->caps.dual_link_dvi = true; | ||
1107 | |||
1106 | 1108 | ||
1107 | /************************************************* | 1109 | /************************************************* |
1108 | * Create resources * | 1110 | * Create resources * |
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 57cd67359567..5aab01db28ee 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | |||
@@ -835,6 +835,8 @@ static bool construct( | |||
835 | dc->caps.max_downscale_ratio = 200; | 835 | dc->caps.max_downscale_ratio = 200; |
836 | dc->caps.i2c_speed_in_khz = 100; | 836 | dc->caps.i2c_speed_in_khz = 100; |
837 | dc->caps.max_cursor_size = 128; | 837 | dc->caps.max_cursor_size = 128; |
838 | dc->caps.dual_link_dvi = true; | ||
839 | |||
838 | dc->debug = debug_defaults; | 840 | dc->debug = debug_defaults; |
839 | 841 | ||
840 | /************************************************* | 842 | /************************************************* |
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 8f2bd56f3461..25d7eb1567ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c | |||
@@ -793,6 +793,7 @@ static bool dce80_construct( | |||
793 | dc->caps.max_downscale_ratio = 200; | 793 | dc->caps.max_downscale_ratio = 200; |
794 | dc->caps.i2c_speed_in_khz = 40; | 794 | dc->caps.i2c_speed_in_khz = 40; |
795 | dc->caps.max_cursor_size = 128; | 795 | dc->caps.max_cursor_size = 128; |
796 | dc->caps.dual_link_dvi = true; | ||
796 | 797 | ||
797 | /************************************************* | 798 | /************************************************* |
798 | * Create resources * | 799 | * Create resources * |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 82572863acab..072e4485e85e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | |||
@@ -238,10 +238,24 @@ static void enable_power_gating_plane( | |||
238 | static void disable_vga( | 238 | static void disable_vga( |
239 | struct dce_hwseq *hws) | 239 | struct dce_hwseq *hws) |
240 | { | 240 | { |
241 | unsigned int in_vga_mode = 0; | ||
242 | |||
243 | REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode); | ||
244 | |||
245 | if (in_vga_mode == 0) | ||
246 | return; | ||
247 | |||
241 | REG_WRITE(D1VGA_CONTROL, 0); | 248 | REG_WRITE(D1VGA_CONTROL, 0); |
242 | REG_WRITE(D2VGA_CONTROL, 0); | 249 | |
243 | REG_WRITE(D3VGA_CONTROL, 0); | 250 | /* HW Engineer's Notes: |
244 | REG_WRITE(D4VGA_CONTROL, 0); | 251 | * During switch from vga->extended, if we set the VGA_TEST_ENABLE and |
252 | * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly. | ||
253 | * | ||
254 | * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset | ||
255 | * VGA_TEST_ENABLE, to leave it in the same state as before. | ||
256 | */ | ||
257 | REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1); | ||
258 | REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1); | ||
245 | } | 259 | } |
246 | 260 | ||
247 | static void dpp_pg_control( | 261 | static void dpp_pg_control( |
@@ -1761,6 +1775,11 @@ static void update_dchubp_dpp( | |||
1761 | &pipe_ctx->plane_res.scl_data.viewport_c); | 1775 | &pipe_ctx->plane_res.scl_data.viewport_c); |
1762 | } | 1776 | } |
1763 | 1777 | ||
1778 | if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { | ||
1779 | dc->hwss.set_cursor_position(pipe_ctx); | ||
1780 | dc->hwss.set_cursor_attribute(pipe_ctx); | ||
1781 | } | ||
1782 | |||
1764 | if (plane_state->update_flags.bits.full_update) { | 1783 | if (plane_state->update_flags.bits.full_update) { |
1765 | /*gamut remap*/ | 1784 | /*gamut remap*/ |
1766 | program_gamut_remap(pipe_ctx); | 1785 | program_gamut_remap(pipe_ctx); |
@@ -2296,7 +2315,7 @@ static bool dcn10_dummy_display_power_gating( | |||
2296 | return true; | 2315 | return true; |
2297 | } | 2316 | } |
2298 | 2317 | ||
2299 | void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) | 2318 | static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) |
2300 | { | 2319 | { |
2301 | struct dc_plane_state *plane_state = pipe_ctx->plane_state; | 2320 | struct dc_plane_state *plane_state = pipe_ctx->plane_state; |
2302 | struct timing_generator *tg = pipe_ctx->stream_res.tg; | 2321 | struct timing_generator *tg = pipe_ctx->stream_res.tg; |
@@ -2316,12 +2335,46 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) | |||
2316 | } | 2335 | } |
2317 | } | 2336 | } |
2318 | 2337 | ||
2319 | void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) | 2338 | static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) |
2320 | { | 2339 | { |
2321 | if (hws->ctx->dc->res_pool->hubbub != NULL) | 2340 | if (hws->ctx->dc->res_pool->hubbub != NULL) |
2322 | hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data); | 2341 | hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data); |
2323 | } | 2342 | } |
2324 | 2343 | ||
2344 | static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) | ||
2345 | { | ||
2346 | struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; | ||
2347 | struct hubp *hubp = pipe_ctx->plane_res.hubp; | ||
2348 | struct dpp *dpp = pipe_ctx->plane_res.dpp; | ||
2349 | struct dc_cursor_mi_param param = { | ||
2350 | .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, | ||
2351 | .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, | ||
2352 | .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, | ||
2353 | .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, | ||
2354 | .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz | ||
2355 | }; | ||
2356 | |||
2357 | if (pipe_ctx->plane_state->address.type | ||
2358 | == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) | ||
2359 | pos_cpy.enable = false; | ||
2360 | |||
2361 | if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) | ||
2362 | pos_cpy.enable = false; | ||
2363 | |||
2364 | hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); | ||
2365 | dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width); | ||
2366 | } | ||
2367 | |||
2368 | static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) | ||
2369 | { | ||
2370 | struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; | ||
2371 | |||
2372 | pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes( | ||
2373 | pipe_ctx->plane_res.hubp, attributes); | ||
2374 | pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes( | ||
2375 | pipe_ctx->plane_res.dpp, attributes->color_format); | ||
2376 | } | ||
2377 | |||
2325 | static const struct hw_sequencer_funcs dcn10_funcs = { | 2378 | static const struct hw_sequencer_funcs dcn10_funcs = { |
2326 | .program_gamut_remap = program_gamut_remap, | 2379 | .program_gamut_remap = program_gamut_remap, |
2327 | .program_csc_matrix = program_csc_matrix, | 2380 | .program_csc_matrix = program_csc_matrix, |
@@ -2362,6 +2415,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = { | |||
2362 | .edp_backlight_control = hwss_edp_backlight_control, | 2415 | .edp_backlight_control = hwss_edp_backlight_control, |
2363 | .edp_power_control = hwss_edp_power_control, | 2416 | .edp_power_control = hwss_edp_power_control, |
2364 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, | 2417 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, |
2418 | .set_cursor_position = dcn10_set_cursor_position, | ||
2419 | .set_cursor_attribute = dcn10_set_cursor_attribute | ||
2365 | }; | 2420 | }; |
2366 | 2421 | ||
2367 | 2422 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 0fd329deacd8..54d8a1386142 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h | |||
@@ -123,8 +123,7 @@ struct link_encoder_funcs { | |||
123 | void (*enable_tmds_output)(struct link_encoder *enc, | 123 | void (*enable_tmds_output)(struct link_encoder *enc, |
124 | enum clock_source_id clock_source, | 124 | enum clock_source_id clock_source, |
125 | enum dc_color_depth color_depth, | 125 | enum dc_color_depth color_depth, |
126 | bool hdmi, | 126 | enum signal_type signal, |
127 | bool dual_link, | ||
128 | uint32_t pixel_clock); | 127 | uint32_t pixel_clock); |
129 | void (*enable_dp_output)(struct link_encoder *enc, | 128 | void (*enable_dp_output)(struct link_encoder *enc, |
130 | const struct dc_link_settings *link_settings, | 129 | const struct dc_link_settings *link_settings, |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 4c0aa56f7bae..379c6ecd271a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | |||
@@ -198,6 +198,9 @@ struct hw_sequencer_funcs { | |||
198 | bool enable); | 198 | bool enable); |
199 | void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); | 199 | void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); |
200 | 200 | ||
201 | void (*set_cursor_position)(struct pipe_ctx *pipe); | ||
202 | void (*set_cursor_attribute)(struct pipe_ctx *pipe); | ||
203 | |||
201 | }; | 204 | }; |
202 | 205 | ||
203 | void color_space_to_black_color( | 206 | void color_space_to_black_color( |
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index f7e40b292dfb..d3e1923b01a8 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c | |||
@@ -217,7 +217,7 @@ bool dce110_vblank_set( | |||
217 | core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; | 217 | core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; |
218 | 218 | ||
219 | if (enable) { | 219 | if (enable) { |
220 | if (!tg->funcs->arm_vert_intr(tg, 2)) { | 220 | if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) { |
221 | DC_ERROR("Failed to get VBLANK!\n"); | 221 | DC_ERROR("Failed to get VBLANK!\n"); |
222 | return false; | 222 | return false; |
223 | } | 223 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c index 57a54a7b89e5..1c079ba37c30 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c | |||
@@ -42,8 +42,7 @@ static void virtual_link_encoder_enable_tmds_output( | |||
42 | struct link_encoder *enc, | 42 | struct link_encoder *enc, |
43 | enum clock_source_id clock_source, | 43 | enum clock_source_id clock_source, |
44 | enum dc_color_depth color_depth, | 44 | enum dc_color_depth color_depth, |
45 | bool hdmi, | 45 | enum signal_type signal, |
46 | bool dual_link, | ||
47 | uint32_t pixel_clock) {} | 46 | uint32_t pixel_clock) {} |
48 | 47 | ||
49 | static void virtual_link_encoder_enable_dp_output( | 48 | static void virtual_link_encoder_enable_dp_output( |
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index 7a9b43f84a31..36bbad594267 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h | |||
@@ -419,11 +419,6 @@ struct bios_event_info { | |||
419 | bool backlight_changed; | 419 | bool backlight_changed; |
420 | }; | 420 | }; |
421 | 421 | ||
422 | enum { | ||
423 | HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000, | ||
424 | TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000 | ||
425 | }; | ||
426 | |||
427 | /* | 422 | /* |
428 | * DFS-bypass flag | 423 | * DFS-bypass flag |
429 | */ | 424 | */ |
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index b5ebde642207..199c5db67cbc 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h | |||
@@ -26,6 +26,11 @@ | |||
26 | #ifndef __DC_SIGNAL_TYPES_H__ | 26 | #ifndef __DC_SIGNAL_TYPES_H__ |
27 | #define __DC_SIGNAL_TYPES_H__ | 27 | #define __DC_SIGNAL_TYPES_H__ |
28 | 28 | ||
29 | /* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */ | ||
30 | #define TMDS_MIN_PIXEL_CLOCK 25000 | ||
31 | /* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */ | ||
32 | #define TMDS_MAX_PIXEL_CLOCK 165000 | ||
33 | |||
29 | enum signal_type { | 34 | enum signal_type { |
30 | SIGNAL_TYPE_NONE = 0L, /* no signal */ | 35 | SIGNAL_TYPE_NONE = 0L, /* no signal */ |
31 | SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0), | 36 | SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0), |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 41e42beff213..08e8a793714f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -2756,10 +2756,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |||
2756 | PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); | 2756 | PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); |
2757 | 2757 | ||
2758 | 2758 | ||
2759 | disable_mclk_switching = ((1 < info.display_count) || | 2759 | if (info.display_count == 0) |
2760 | disable_mclk_switching_for_frame_lock || | 2760 | disable_mclk_switching = false; |
2761 | smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || | 2761 | else |
2762 | (mode_info.refresh_rate > 120)); | 2762 | disable_mclk_switching = ((1 < info.display_count) || |
2763 | disable_mclk_switching_for_frame_lock || | ||
2764 | smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || | ||
2765 | (mode_info.refresh_rate > 120)); | ||
2763 | 2766 | ||
2764 | sclk = smu7_ps->performance_levels[0].engine_clock; | 2767 | sclk = smu7_ps->performance_levels[0].engine_clock; |
2765 | mclk = smu7_ps->performance_levels[0].memory_clock; | 2768 | mclk = smu7_ps->performance_levels[0].memory_clock; |
@@ -4534,13 +4537,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, | |||
4534 | int tmp_result, result = 0; | 4537 | int tmp_result, result = 0; |
4535 | uint32_t sclk_mask = 0, mclk_mask = 0; | 4538 | uint32_t sclk_mask = 0, mclk_mask = 0; |
4536 | 4539 | ||
4537 | if (hwmgr->chip_id == CHIP_FIJI) { | ||
4538 | if (request->type == AMD_PP_GFX_PROFILE) | ||
4539 | smu7_enable_power_containment(hwmgr); | ||
4540 | else if (request->type == AMD_PP_COMPUTE_PROFILE) | ||
4541 | smu7_disable_power_containment(hwmgr); | ||
4542 | } | ||
4543 | |||
4544 | if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO) | 4540 | if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO) |
4545 | return -EINVAL; | 4541 | return -EINVAL; |
4546 | 4542 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 2d55dabc77d4..5f9c3efb532f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | |||
@@ -3168,10 +3168,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |||
3168 | disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); | 3168 | disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); |
3169 | force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); | 3169 | force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); |
3170 | 3170 | ||
3171 | disable_mclk_switching = (info.display_count > 1) || | 3171 | if (info.display_count == 0) |
3172 | disable_mclk_switching_for_frame_lock || | 3172 | disable_mclk_switching = false; |
3173 | disable_mclk_switching_for_vr || | 3173 | else |
3174 | force_mclk_high; | 3174 | disable_mclk_switching = (info.display_count > 1) || |
3175 | disable_mclk_switching_for_frame_lock || | ||
3176 | disable_mclk_switching_for_vr || | ||
3177 | force_mclk_high; | ||
3175 | 3178 | ||
3176 | sclk = vega10_ps->performance_levels[0].gfx_clock; | 3179 | sclk = vega10_ps->performance_levels[0].gfx_clock; |
3177 | mclk = vega10_ps->performance_levels[0].mem_clock; | 3180 | mclk = vega10_ps->performance_levels[0].mem_clock; |
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 5a13ff29f4f0..c0530a1af5e3 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c | |||
@@ -121,6 +121,10 @@ int drm_mode_addfb(struct drm_device *dev, | |||
121 | r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); | 121 | r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); |
122 | r.handles[0] = or->handle; | 122 | r.handles[0] = or->handle; |
123 | 123 | ||
124 | if (r.pixel_format == DRM_FORMAT_XRGB2101010 && | ||
125 | dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP) | ||
126 | r.pixel_format = DRM_FORMAT_XBGR2101010; | ||
127 | |||
124 | ret = drm_mode_addfb2(dev, &r, file_priv); | 128 | ret = drm_mode_addfb2(dev, &r, file_priv); |
125 | if (ret) | 129 | if (ret) |
126 | return ret; | 130 | return ret; |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index c8454ac43fae..db6b94dda5df 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -471,6 +471,7 @@ struct parser_exec_state { | |||
471 | * used when ret from 2nd level batch buffer | 471 | * used when ret from 2nd level batch buffer |
472 | */ | 472 | */ |
473 | int saved_buf_addr_type; | 473 | int saved_buf_addr_type; |
474 | bool is_ctx_wa; | ||
474 | 475 | ||
475 | struct cmd_info *info; | 476 | struct cmd_info *info; |
476 | 477 | ||
@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s) | |||
1715 | bb->accessing = true; | 1716 | bb->accessing = true; |
1716 | bb->bb_start_cmd_va = s->ip_va; | 1717 | bb->bb_start_cmd_va = s->ip_va; |
1717 | 1718 | ||
1719 | if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa)) | ||
1720 | bb->bb_offset = s->ip_va - s->rb_va; | ||
1721 | else | ||
1722 | bb->bb_offset = 0; | ||
1723 | |||
1718 | /* | 1724 | /* |
1719 | * ip_va saves the virtual address of the shadow batch buffer, while | 1725 | * ip_va saves the virtual address of the shadow batch buffer, while |
1720 | * ip_gma saves the graphics address of the original batch buffer. | 1726 | * ip_gma saves the graphics address of the original batch buffer. |
@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) | |||
2571 | s.ring_tail = gma_tail; | 2577 | s.ring_tail = gma_tail; |
2572 | s.rb_va = workload->shadow_ring_buffer_va; | 2578 | s.rb_va = workload->shadow_ring_buffer_va; |
2573 | s.workload = workload; | 2579 | s.workload = workload; |
2580 | s.is_ctx_wa = false; | ||
2574 | 2581 | ||
2575 | if ((bypass_scan_mask & (1 << workload->ring_id)) || | 2582 | if ((bypass_scan_mask & (1 << workload->ring_id)) || |
2576 | gma_head == gma_tail) | 2583 | gma_head == gma_tail) |
@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
2624 | s.ring_tail = gma_tail; | 2631 | s.ring_tail = gma_tail; |
2625 | s.rb_va = wa_ctx->indirect_ctx.shadow_va; | 2632 | s.rb_va = wa_ctx->indirect_ctx.shadow_va; |
2626 | s.workload = workload; | 2633 | s.workload = workload; |
2634 | s.is_ctx_wa = true; | ||
2627 | 2635 | ||
2628 | if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { | 2636 | if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { |
2629 | ret = -EINVAL; | 2637 | ret = -EINVAL; |
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 256f1bb522b7..152df3d0291e 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c | |||
@@ -394,9 +394,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, | |||
394 | * performace for batch mmio read/write, so we need | 394 | * performace for batch mmio read/write, so we need |
395 | * handle forcewake mannually. | 395 | * handle forcewake mannually. |
396 | */ | 396 | */ |
397 | intel_runtime_pm_get(dev_priv); | ||
397 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 398 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
398 | switch_mmio(pre, next, ring_id); | 399 | switch_mmio(pre, next, ring_id); |
399 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 400 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
401 | intel_runtime_pm_put(dev_priv); | ||
400 | } | 402 | } |
401 | 403 | ||
402 | /** | 404 | /** |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b55b3580ca1d..d74d6f05c62c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer( | |||
52 | pdp_pair[i].val = pdp[7 - i]; | 52 | pdp_pair[i].val = pdp[7 - i]; |
53 | } | 53 | } |
54 | 54 | ||
55 | /* | ||
56 | * when populating shadow ctx from guest, we should not overrride oa related | ||
57 | * registers, so that they will not be overlapped by guest oa configs. Thus | ||
58 | * made it possible to capture oa data from host for both host and guests. | ||
59 | */ | ||
60 | static void sr_oa_regs(struct intel_vgpu_workload *workload, | ||
61 | u32 *reg_state, bool save) | ||
62 | { | ||
63 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; | ||
64 | u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; | ||
65 | u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; | ||
66 | int i = 0; | ||
67 | u32 flex_mmio[] = { | ||
68 | i915_mmio_reg_offset(EU_PERF_CNTL0), | ||
69 | i915_mmio_reg_offset(EU_PERF_CNTL1), | ||
70 | i915_mmio_reg_offset(EU_PERF_CNTL2), | ||
71 | i915_mmio_reg_offset(EU_PERF_CNTL3), | ||
72 | i915_mmio_reg_offset(EU_PERF_CNTL4), | ||
73 | i915_mmio_reg_offset(EU_PERF_CNTL5), | ||
74 | i915_mmio_reg_offset(EU_PERF_CNTL6), | ||
75 | }; | ||
76 | |||
77 | if (!workload || !reg_state || workload->ring_id != RCS) | ||
78 | return; | ||
79 | |||
80 | if (save) { | ||
81 | workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; | ||
82 | |||
83 | for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { | ||
84 | u32 state_offset = ctx_flexeu0 + i * 2; | ||
85 | |||
86 | workload->flex_mmio[i] = reg_state[state_offset + 1]; | ||
87 | } | ||
88 | } else { | ||
89 | reg_state[ctx_oactxctrl] = | ||
90 | i915_mmio_reg_offset(GEN8_OACTXCONTROL); | ||
91 | reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; | ||
92 | |||
93 | for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { | ||
94 | u32 state_offset = ctx_flexeu0 + i * 2; | ||
95 | u32 mmio = flex_mmio[i]; | ||
96 | |||
97 | reg_state[state_offset] = mmio; | ||
98 | reg_state[state_offset + 1] = workload->flex_mmio[i]; | ||
99 | } | ||
100 | } | ||
101 | } | ||
102 | |||
55 | static int populate_shadow_context(struct intel_vgpu_workload *workload) | 103 | static int populate_shadow_context(struct intel_vgpu_workload *workload) |
56 | { | 104 | { |
57 | struct intel_vgpu *vgpu = workload->vgpu; | 105 | struct intel_vgpu *vgpu = workload->vgpu; |
@@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) | |||
98 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | 146 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
99 | shadow_ring_context = kmap(page); | 147 | shadow_ring_context = kmap(page); |
100 | 148 | ||
149 | sr_oa_regs(workload, (u32 *)shadow_ring_context, true); | ||
101 | #define COPY_REG(name) \ | 150 | #define COPY_REG(name) \ |
102 | intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ | 151 | intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ |
103 | + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) | 152 | + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) |
@@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) | |||
122 | sizeof(*shadow_ring_context), | 171 | sizeof(*shadow_ring_context), |
123 | I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); | 172 | I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); |
124 | 173 | ||
174 | sr_oa_regs(workload, (u32 *)shadow_ring_context, false); | ||
125 | kunmap(page); | 175 | kunmap(page); |
126 | return 0; | 176 | return 0; |
127 | } | 177 | } |
@@ -376,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) | |||
376 | goto err; | 426 | goto err; |
377 | } | 427 | } |
378 | 428 | ||
429 | /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va | ||
430 | * is only updated into ring_scan_buffer, not real ring address | ||
431 | * allocated in later copy_workload_to_ring_buffer. pls be noted | ||
432 | * shadow_ring_buffer_va is now pointed to real ring buffer va | ||
433 | * in copy_workload_to_ring_buffer. | ||
434 | */ | ||
435 | |||
436 | if (bb->bb_offset) | ||
437 | bb->bb_start_cmd_va = workload->shadow_ring_buffer_va | ||
438 | + bb->bb_offset; | ||
439 | |||
379 | /* relocate shadow batch buffer */ | 440 | /* relocate shadow batch buffer */ |
380 | bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); | 441 | bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); |
381 | if (gmadr_bytes == 8) | 442 | if (gmadr_bytes == 8) |
@@ -1044,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) | |||
1044 | 1105 | ||
1045 | bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); | 1106 | bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); |
1046 | 1107 | ||
1047 | s->workloads = kmem_cache_create("gvt-g_vgpu_workload", | 1108 | s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", |
1048 | sizeof(struct intel_vgpu_workload), 0, | 1109 | sizeof(struct intel_vgpu_workload), 0, |
1049 | SLAB_HWCACHE_ALIGN, | 1110 | SLAB_HWCACHE_ALIGN, |
1050 | NULL); | 1111 | offsetof(struct intel_vgpu_workload, rb_tail), |
1112 | sizeof_field(struct intel_vgpu_workload, rb_tail), | ||
1113 | NULL); | ||
1051 | 1114 | ||
1052 | if (!s->workloads) { | 1115 | if (!s->workloads) { |
1053 | ret = -ENOMEM; | 1116 | ret = -ENOMEM; |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index ff175a98b19e..a79a4f60637e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h | |||
@@ -110,6 +110,10 @@ struct intel_vgpu_workload { | |||
110 | /* shadow batch buffer */ | 110 | /* shadow batch buffer */ |
111 | struct list_head shadow_bb; | 111 | struct list_head shadow_bb; |
112 | struct intel_shadow_wa_ctx wa_ctx; | 112 | struct intel_shadow_wa_ctx wa_ctx; |
113 | |||
114 | /* oa registers */ | ||
115 | u32 oactxctrl; | ||
116 | u32 flex_mmio[7]; | ||
113 | }; | 117 | }; |
114 | 118 | ||
115 | struct intel_vgpu_shadow_bb { | 119 | struct intel_vgpu_shadow_bb { |
@@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb { | |||
120 | u32 *bb_start_cmd_va; | 124 | u32 *bb_start_cmd_va; |
121 | unsigned int clflush; | 125 | unsigned int clflush; |
122 | bool accessing; | 126 | bool accessing; |
127 | unsigned long bb_offset; | ||
123 | }; | 128 | }; |
124 | 129 | ||
125 | #define workload_q_head(vgpu, ring_id) \ | 130 | #define workload_q_head(vgpu, ring_id) \ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dd89abd2263d..6ff5d655c202 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -434,20 +434,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
434 | dma_fence_put(shared[i]); | 434 | dma_fence_put(shared[i]); |
435 | kfree(shared); | 435 | kfree(shared); |
436 | 436 | ||
437 | /* | ||
438 | * If both shared fences and an exclusive fence exist, | ||
439 | * then by construction the shared fences must be later | ||
440 | * than the exclusive fence. If we successfully wait for | ||
441 | * all the shared fences, we know that the exclusive fence | ||
442 | * must all be signaled. If all the shared fences are | ||
443 | * signaled, we can prune the array and recover the | ||
444 | * floating references on the fences/requests. | ||
445 | */ | ||
437 | prune_fences = count && timeout >= 0; | 446 | prune_fences = count && timeout >= 0; |
438 | } else { | 447 | } else { |
439 | excl = reservation_object_get_excl_rcu(resv); | 448 | excl = reservation_object_get_excl_rcu(resv); |
440 | } | 449 | } |
441 | 450 | ||
442 | if (excl && timeout >= 0) { | 451 | if (excl && timeout >= 0) |
443 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, | 452 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, |
444 | rps_client); | 453 | rps_client); |
445 | prune_fences = timeout >= 0; | ||
446 | } | ||
447 | 454 | ||
448 | dma_fence_put(excl); | 455 | dma_fence_put(excl); |
449 | 456 | ||
450 | /* Oportunistically prune the fences iff we know they have *all* been | 457 | /* |
458 | * Opportunistically prune the fences iff we know they have *all* been | ||
451 | * signaled and that the reservation object has not been changed (i.e. | 459 | * signaled and that the reservation object has not been changed (i.e. |
452 | * no new fences have been added). | 460 | * no new fences have been added). |
453 | */ | 461 | */ |
@@ -3205,8 +3213,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) | |||
3205 | * rolling the global seqno forward (since this would complete requests | 3213 | * rolling the global seqno forward (since this would complete requests |
3206 | * for which we haven't set the fence error to EIO yet). | 3214 | * for which we haven't set the fence error to EIO yet). |
3207 | */ | 3215 | */ |
3208 | for_each_engine(engine, i915, id) | 3216 | for_each_engine(engine, i915, id) { |
3217 | i915_gem_reset_prepare_engine(engine); | ||
3209 | engine->submit_request = nop_submit_request; | 3218 | engine->submit_request = nop_submit_request; |
3219 | } | ||
3210 | 3220 | ||
3211 | /* | 3221 | /* |
3212 | * Make sure no one is running the old callback before we proceed with | 3222 | * Make sure no one is running the old callback before we proceed with |
@@ -3244,6 +3254,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) | |||
3244 | intel_engine_init_global_seqno(engine, | 3254 | intel_engine_init_global_seqno(engine, |
3245 | intel_engine_last_submit(engine)); | 3255 | intel_engine_last_submit(engine)); |
3246 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | 3256 | spin_unlock_irqrestore(&engine->timeline->lock, flags); |
3257 | |||
3258 | i915_gem_reset_finish_engine(engine); | ||
3247 | } | 3259 | } |
3248 | 3260 | ||
3249 | set_bit(I915_WEDGED, &i915->gpu_error.flags); | 3261 | set_bit(I915_WEDGED, &i915->gpu_error.flags); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 4401068ff468..3ab1ace2a6bd 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) | |||
505 | list_add_tail(&vma->exec_link, &eb->unbound); | 505 | list_add_tail(&vma->exec_link, &eb->unbound); |
506 | if (drm_mm_node_allocated(&vma->node)) | 506 | if (drm_mm_node_allocated(&vma->node)) |
507 | err = i915_vma_unbind(vma); | 507 | err = i915_vma_unbind(vma); |
508 | if (unlikely(err)) | ||
509 | vma->exec_flags = NULL; | ||
508 | } | 510 | } |
509 | return err; | 511 | return err; |
510 | } | 512 | } |
@@ -2410,7 +2412,7 @@ err_request: | |||
2410 | if (out_fence) { | 2412 | if (out_fence) { |
2411 | if (err == 0) { | 2413 | if (err == 0) { |
2412 | fd_install(out_fence_fd, out_fence->file); | 2414 | fd_install(out_fence_fd, out_fence->file); |
2413 | args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */ | 2415 | args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ |
2414 | args->rsvd2 |= (u64)out_fence_fd << 32; | 2416 | args->rsvd2 |= (u64)out_fence_fd << 32; |
2415 | out_fence_fd = -1; | 2417 | out_fence_fd = -1; |
2416 | } else { | 2418 | } else { |
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index e09d18df8b7f..a3e93d46316a 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c | |||
@@ -476,8 +476,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request) | |||
476 | GEM_BUG_ON(!irqs_disabled()); | 476 | GEM_BUG_ON(!irqs_disabled()); |
477 | lockdep_assert_held(&engine->timeline->lock); | 477 | lockdep_assert_held(&engine->timeline->lock); |
478 | 478 | ||
479 | trace_i915_gem_request_execute(request); | ||
480 | |||
481 | /* Transfer from per-context onto the global per-engine timeline */ | 479 | /* Transfer from per-context onto the global per-engine timeline */ |
482 | timeline = engine->timeline; | 480 | timeline = engine->timeline; |
483 | GEM_BUG_ON(timeline == request->timeline); | 481 | GEM_BUG_ON(timeline == request->timeline); |
@@ -501,6 +499,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request) | |||
501 | list_move_tail(&request->link, &timeline->requests); | 499 | list_move_tail(&request->link, &timeline->requests); |
502 | spin_unlock(&request->timeline->lock); | 500 | spin_unlock(&request->timeline->lock); |
503 | 501 | ||
502 | trace_i915_gem_request_execute(request); | ||
503 | |||
504 | wake_up_all(&request->execute); | 504 | wake_up_all(&request->execute); |
505 | } | 505 | } |
506 | 506 | ||
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 0be50e43507d..f8fe5ffcdcff 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c | |||
@@ -1303,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) | |||
1303 | */ | 1303 | */ |
1304 | mutex_lock(&dev_priv->drm.struct_mutex); | 1304 | mutex_lock(&dev_priv->drm.struct_mutex); |
1305 | dev_priv->perf.oa.exclusive_stream = NULL; | 1305 | dev_priv->perf.oa.exclusive_stream = NULL; |
1306 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
1307 | |||
1308 | dev_priv->perf.oa.ops.disable_metric_set(dev_priv); | 1306 | dev_priv->perf.oa.ops.disable_metric_set(dev_priv); |
1307 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
1309 | 1308 | ||
1310 | free_oa_buffer(dev_priv); | 1309 | free_oa_buffer(dev_priv); |
1311 | 1310 | ||
@@ -1756,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr | |||
1756 | * Note: it's only the RCS/Render context that has any OA state. | 1755 | * Note: it's only the RCS/Render context that has any OA state. |
1757 | */ | 1756 | */ |
1758 | static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, | 1757 | static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, |
1759 | const struct i915_oa_config *oa_config, | 1758 | const struct i915_oa_config *oa_config) |
1760 | bool interruptible) | ||
1761 | { | 1759 | { |
1762 | struct i915_gem_context *ctx; | 1760 | struct i915_gem_context *ctx; |
1763 | int ret; | 1761 | int ret; |
1764 | unsigned int wait_flags = I915_WAIT_LOCKED; | 1762 | unsigned int wait_flags = I915_WAIT_LOCKED; |
1765 | 1763 | ||
1766 | if (interruptible) { | 1764 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
1767 | ret = i915_mutex_lock_interruptible(&dev_priv->drm); | ||
1768 | if (ret) | ||
1769 | return ret; | ||
1770 | |||
1771 | wait_flags |= I915_WAIT_INTERRUPTIBLE; | ||
1772 | } else { | ||
1773 | mutex_lock(&dev_priv->drm.struct_mutex); | ||
1774 | } | ||
1775 | 1765 | ||
1776 | /* Switch away from any user context. */ | 1766 | /* Switch away from any user context. */ |
1777 | ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); | 1767 | ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); |
@@ -1819,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, | |||
1819 | } | 1809 | } |
1820 | 1810 | ||
1821 | out: | 1811 | out: |
1822 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
1823 | |||
1824 | return ret; | 1812 | return ret; |
1825 | } | 1813 | } |
1826 | 1814 | ||
@@ -1863,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, | |||
1863 | * to make sure all slices/subslices are ON before writing to NOA | 1851 | * to make sure all slices/subslices are ON before writing to NOA |
1864 | * registers. | 1852 | * registers. |
1865 | */ | 1853 | */ |
1866 | ret = gen8_configure_all_contexts(dev_priv, oa_config, true); | 1854 | ret = gen8_configure_all_contexts(dev_priv, oa_config); |
1867 | if (ret) | 1855 | if (ret) |
1868 | return ret; | 1856 | return ret; |
1869 | 1857 | ||
@@ -1878,7 +1866,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, | |||
1878 | static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) | 1866 | static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) |
1879 | { | 1867 | { |
1880 | /* Reset all contexts' slices/subslices configurations. */ | 1868 | /* Reset all contexts' slices/subslices configurations. */ |
1881 | gen8_configure_all_contexts(dev_priv, NULL, false); | 1869 | gen8_configure_all_contexts(dev_priv, NULL); |
1882 | 1870 | ||
1883 | I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & | 1871 | I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & |
1884 | ~GT_NOA_ENABLE)); | 1872 | ~GT_NOA_ENABLE)); |
@@ -1888,7 +1876,7 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) | |||
1888 | static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) | 1876 | static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) |
1889 | { | 1877 | { |
1890 | /* Reset all contexts' slices/subslices configurations. */ | 1878 | /* Reset all contexts' slices/subslices configurations. */ |
1891 | gen8_configure_all_contexts(dev_priv, NULL, false); | 1879 | gen8_configure_all_contexts(dev_priv, NULL); |
1892 | 1880 | ||
1893 | /* Make sure we disable noa to save power. */ | 1881 | /* Make sure we disable noa to save power. */ |
1894 | I915_WRITE(RPM_CONFIG1, | 1882 | I915_WRITE(RPM_CONFIG1, |
@@ -2138,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, | |||
2138 | if (ret) | 2126 | if (ret) |
2139 | goto err_oa_buf_alloc; | 2127 | goto err_oa_buf_alloc; |
2140 | 2128 | ||
2129 | ret = i915_mutex_lock_interruptible(&dev_priv->drm); | ||
2130 | if (ret) | ||
2131 | goto err_lock; | ||
2132 | |||
2141 | ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, | 2133 | ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, |
2142 | stream->oa_config); | 2134 | stream->oa_config); |
2143 | if (ret) | 2135 | if (ret) |
@@ -2145,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, | |||
2145 | 2137 | ||
2146 | stream->ops = &i915_oa_stream_ops; | 2138 | stream->ops = &i915_oa_stream_ops; |
2147 | 2139 | ||
2148 | /* Lock device for exclusive_stream access late because | ||
2149 | * enable_metric_set() might lock as well on gen8+. | ||
2150 | */ | ||
2151 | ret = i915_mutex_lock_interruptible(&dev_priv->drm); | ||
2152 | if (ret) | ||
2153 | goto err_lock; | ||
2154 | |||
2155 | dev_priv->perf.oa.exclusive_stream = stream; | 2140 | dev_priv->perf.oa.exclusive_stream = stream; |
2156 | 2141 | ||
2157 | mutex_unlock(&dev_priv->drm.struct_mutex); | 2142 | mutex_unlock(&dev_priv->drm.struct_mutex); |
2158 | 2143 | ||
2159 | return 0; | 2144 | return 0; |
2160 | 2145 | ||
2161 | err_lock: | 2146 | err_enable: |
2162 | dev_priv->perf.oa.ops.disable_metric_set(dev_priv); | 2147 | dev_priv->perf.oa.ops.disable_metric_set(dev_priv); |
2148 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
2163 | 2149 | ||
2164 | err_enable: | 2150 | err_lock: |
2165 | free_oa_buffer(dev_priv); | 2151 | free_oa_buffer(dev_priv); |
2166 | 2152 | ||
2167 | err_oa_buf_alloc: | 2153 | err_oa_buf_alloc: |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a2108e35c599..33eb0c5b1d32 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -2027,7 +2027,7 @@ enum i915_power_well_id { | |||
2027 | #define _CNL_PORT_TX_DW5_LN0_AE 0x162454 | 2027 | #define _CNL_PORT_TX_DW5_LN0_AE 0x162454 |
2028 | #define _CNL_PORT_TX_DW5_LN0_B 0x162654 | 2028 | #define _CNL_PORT_TX_DW5_LN0_B 0x162654 |
2029 | #define _CNL_PORT_TX_DW5_LN0_C 0x162C54 | 2029 | #define _CNL_PORT_TX_DW5_LN0_C 0x162C54 |
2030 | #define _CNL_PORT_TX_DW5_LN0_D 0x162ED4 | 2030 | #define _CNL_PORT_TX_DW5_LN0_D 0x162E54 |
2031 | #define _CNL_PORT_TX_DW5_LN0_F 0x162854 | 2031 | #define _CNL_PORT_TX_DW5_LN0_F 0x162854 |
2032 | #define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \ | 2032 | #define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \ |
2033 | _CNL_PORT_TX_DW5_GRP_AE, \ | 2033 | _CNL_PORT_TX_DW5_GRP_AE, \ |
@@ -2058,7 +2058,7 @@ enum i915_power_well_id { | |||
2058 | #define _CNL_PORT_TX_DW7_LN0_AE 0x16245C | 2058 | #define _CNL_PORT_TX_DW7_LN0_AE 0x16245C |
2059 | #define _CNL_PORT_TX_DW7_LN0_B 0x16265C | 2059 | #define _CNL_PORT_TX_DW7_LN0_B 0x16265C |
2060 | #define _CNL_PORT_TX_DW7_LN0_C 0x162C5C | 2060 | #define _CNL_PORT_TX_DW7_LN0_C 0x162C5C |
2061 | #define _CNL_PORT_TX_DW7_LN0_D 0x162EDC | 2061 | #define _CNL_PORT_TX_DW7_LN0_D 0x162E5C |
2062 | #define _CNL_PORT_TX_DW7_LN0_F 0x16285C | 2062 | #define _CNL_PORT_TX_DW7_LN0_F 0x16285C |
2063 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \ | 2063 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \ |
2064 | _CNL_PORT_TX_DW7_GRP_AE, \ | 2064 | _CNL_PORT_TX_DW7_GRP_AE, \ |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index b33d2158c234..e5e6f6bb2b05 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, | |||
304 | { | 304 | { |
305 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); | 305 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
306 | struct intel_rps *rps = &dev_priv->gt_pm.rps; | 306 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
307 | u32 val; | 307 | bool boost = false; |
308 | ssize_t ret; | 308 | ssize_t ret; |
309 | u32 val; | ||
309 | 310 | ||
310 | ret = kstrtou32(buf, 0, &val); | 311 | ret = kstrtou32(buf, 0, &val); |
311 | if (ret) | 312 | if (ret) |
@@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, | |||
317 | return -EINVAL; | 318 | return -EINVAL; |
318 | 319 | ||
319 | mutex_lock(&dev_priv->pcu_lock); | 320 | mutex_lock(&dev_priv->pcu_lock); |
320 | rps->boost_freq = val; | 321 | if (val != rps->boost_freq) { |
322 | rps->boost_freq = val; | ||
323 | boost = atomic_read(&rps->num_waiters); | ||
324 | } | ||
321 | mutex_unlock(&dev_priv->pcu_lock); | 325 | mutex_unlock(&dev_priv->pcu_lock); |
326 | if (boost) | ||
327 | schedule_work(&rps->work); | ||
322 | 328 | ||
323 | return count; | 329 | return count; |
324 | } | 330 | } |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 522d54fecb53..4a01f62a392d 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -779,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, | |||
779 | { | 779 | { |
780 | struct intel_encoder *encoder; | 780 | struct intel_encoder *encoder; |
781 | 781 | ||
782 | if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) | ||
783 | return NULL; | ||
784 | |||
785 | /* MST */ | 782 | /* MST */ |
786 | if (pipe >= 0) { | 783 | if (pipe >= 0) { |
784 | if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) | ||
785 | return NULL; | ||
786 | |||
787 | encoder = dev_priv->av_enc_map[pipe]; | 787 | encoder = dev_priv->av_enc_map[pipe]; |
788 | /* | 788 | /* |
789 | * when bootup, audio driver may not know it is | 789 | * when bootup, audio driver may not know it is |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 35c5299feab6..a29868cd30c7 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -620,19 +620,15 @@ static int | |||
620 | bxt_power_sequencer_idx(struct intel_dp *intel_dp) | 620 | bxt_power_sequencer_idx(struct intel_dp *intel_dp) |
621 | { | 621 | { |
622 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); | 622 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); |
623 | int backlight_controller = dev_priv->vbt.backlight.controller; | ||
623 | 624 | ||
624 | lockdep_assert_held(&dev_priv->pps_mutex); | 625 | lockdep_assert_held(&dev_priv->pps_mutex); |
625 | 626 | ||
626 | /* We should never land here with regular DP ports */ | 627 | /* We should never land here with regular DP ports */ |
627 | WARN_ON(!intel_dp_is_edp(intel_dp)); | 628 | WARN_ON(!intel_dp_is_edp(intel_dp)); |
628 | 629 | ||
629 | /* | ||
630 | * TODO: BXT has 2 PPS instances. The correct port->PPS instance | ||
631 | * mapping needs to be retrieved from VBT, for now just hard-code to | ||
632 | * use instance #0 always. | ||
633 | */ | ||
634 | if (!intel_dp->pps_reset) | 630 | if (!intel_dp->pps_reset) |
635 | return 0; | 631 | return backlight_controller; |
636 | 632 | ||
637 | intel_dp->pps_reset = false; | 633 | intel_dp->pps_reset = false; |
638 | 634 | ||
@@ -642,7 +638,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) | |||
642 | */ | 638 | */ |
643 | intel_dp_init_panel_power_sequencer_registers(intel_dp, false); | 639 | intel_dp_init_panel_power_sequencer_registers(intel_dp, false); |
644 | 640 | ||
645 | return 0; | 641 | return backlight_controller; |
646 | } | 642 | } |
647 | 643 | ||
648 | typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, | 644 | typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7ece2f061b9e..e0fca035ff78 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -719,6 +719,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||
719 | struct rb_node *rb; | 719 | struct rb_node *rb; |
720 | unsigned long flags; | 720 | unsigned long flags; |
721 | 721 | ||
722 | GEM_TRACE("%s\n", engine->name); | ||
723 | |||
722 | spin_lock_irqsave(&engine->timeline->lock, flags); | 724 | spin_lock_irqsave(&engine->timeline->lock, flags); |
723 | 725 | ||
724 | /* Cancel the requests on the HW and clear the ELSP tracker. */ | 726 | /* Cancel the requests on the HW and clear the ELSP tracker. */ |
@@ -765,6 +767,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||
765 | */ | 767 | */ |
766 | clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); | 768 | clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); |
767 | 769 | ||
770 | /* Mark all CS interrupts as complete */ | ||
771 | execlists->active = 0; | ||
772 | |||
768 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | 773 | spin_unlock_irqrestore(&engine->timeline->lock, flags); |
769 | } | 774 | } |
770 | 775 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 380f340204e8..debbbf0fd4bd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd) | |||
134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
135 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 135 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
136 | struct nvif_object *device = &drm->client.device.object; | 136 | struct nvif_object *device = &drm->client.device.object; |
137 | int or = nv_encoder->or; | 137 | int or = ffs(nv_encoder->dcb->or) - 1; |
138 | u32 div = 1025; | 138 | u32 div = 1025; |
139 | u32 val; | 139 | u32 val; |
140 | 140 | ||
@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd) | |||
149 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 149 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
150 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 150 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
151 | struct nvif_object *device = &drm->client.device.object; | 151 | struct nvif_object *device = &drm->client.device.object; |
152 | int or = nv_encoder->or; | 152 | int or = ffs(nv_encoder->dcb->or) - 1; |
153 | u32 div = 1025; | 153 | u32 div = 1025; |
154 | u32 val = (bd->props.brightness * div) / 100; | 154 | u32 val = (bd->props.brightness * div) / 100; |
155 | 155 | ||
@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd) | |||
170 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 170 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
171 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 171 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
172 | struct nvif_object *device = &drm->client.device.object; | 172 | struct nvif_object *device = &drm->client.device.object; |
173 | int or = nv_encoder->or; | 173 | int or = ffs(nv_encoder->dcb->or) - 1; |
174 | u32 div, val; | 174 | u32 div, val; |
175 | 175 | ||
176 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); | 176 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); |
@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd) | |||
188 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 188 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
189 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 189 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
190 | struct nvif_object *device = &drm->client.device.object; | 190 | struct nvif_object *device = &drm->client.device.object; |
191 | int or = nv_encoder->or; | 191 | int or = ffs(nv_encoder->dcb->or) - 1; |
192 | u32 div, val; | 192 | u32 div, val; |
193 | 193 | ||
194 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); | 194 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); |
@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector) | |||
228 | return -ENODEV; | 228 | return -ENODEV; |
229 | } | 229 | } |
230 | 230 | ||
231 | if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) | 231 | if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1))) |
232 | return 0; | 232 | return 0; |
233 | 233 | ||
234 | if (drm->client.device.info.chipset <= 0xa0 || | 234 | if (drm->client.device.info.chipset <= 0xa0 || |
@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev) | |||
268 | struct nvif_device *device = &drm->client.device; | 268 | struct nvif_device *device = &drm->client.device; |
269 | struct drm_connector *connector; | 269 | struct drm_connector *connector; |
270 | 270 | ||
271 | INIT_LIST_HEAD(&drm->bl_connectors); | ||
272 | |||
271 | if (apple_gmux_present()) { | 273 | if (apple_gmux_present()) { |
272 | NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); | 274 | NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); |
273 | return 0; | 275 | return 0; |
274 | } | 276 | } |
275 | 277 | ||
276 | INIT_LIST_HEAD(&drm->bl_connectors); | ||
277 | |||
278 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 278 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
279 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && | 279 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && |
280 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) | 280 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index dd8d4352ed99..caddce88d2d8 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -4477,6 +4477,7 @@ nv50_display_create(struct drm_device *dev) | |||
4477 | nouveau_display(dev)->fini = nv50_display_fini; | 4477 | nouveau_display(dev)->fini = nv50_display_fini; |
4478 | disp->disp = &nouveau_display(dev)->disp; | 4478 | disp->disp = &nouveau_display(dev)->disp; |
4479 | dev->mode_config.funcs = &nv50_disp_func; | 4479 | dev->mode_config.funcs = &nv50_disp_func; |
4480 | dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; | ||
4480 | if (nouveau_atomic) | 4481 | if (nouveau_atomic) |
4481 | dev->driver->driver_features |= DRIVER_ATOMIC; | 4482 | dev->driver->driver_features |= DRIVER_ATOMIC; |
4482 | 4483 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 93946dcee319..1c12e58f44c2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | |||
@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse, | |||
1354 | 1354 | ||
1355 | tail = this->addr + this->size; | 1355 | tail = this->addr + this->size; |
1356 | if (vmm->func->page_block && next && next->page != p) | 1356 | if (vmm->func->page_block && next && next->page != p) |
1357 | tail = ALIGN_DOWN(addr, vmm->func->page_block); | 1357 | tail = ALIGN_DOWN(tail, vmm->func->page_block); |
1358 | 1358 | ||
1359 | if (addr <= tail && tail - addr >= size) { | 1359 | if (addr <= tail && tail - addr >= size) { |
1360 | rb_erase(&this->tree, &vmm->free); | 1360 | rb_erase(&this->tree, &vmm->free); |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index d3045a371a55..7c73bc7e2f85 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -3221,35 +3221,8 @@ static void cik_gpu_init(struct radeon_device *rdev) | |||
3221 | case CHIP_KAVERI: | 3221 | case CHIP_KAVERI: |
3222 | rdev->config.cik.max_shader_engines = 1; | 3222 | rdev->config.cik.max_shader_engines = 1; |
3223 | rdev->config.cik.max_tile_pipes = 4; | 3223 | rdev->config.cik.max_tile_pipes = 4; |
3224 | if ((rdev->pdev->device == 0x1304) || | 3224 | rdev->config.cik.max_cu_per_sh = 8; |
3225 | (rdev->pdev->device == 0x1305) || | 3225 | rdev->config.cik.max_backends_per_se = 2; |
3226 | (rdev->pdev->device == 0x130C) || | ||
3227 | (rdev->pdev->device == 0x130F) || | ||
3228 | (rdev->pdev->device == 0x1310) || | ||
3229 | (rdev->pdev->device == 0x1311) || | ||
3230 | (rdev->pdev->device == 0x131C)) { | ||
3231 | rdev->config.cik.max_cu_per_sh = 8; | ||
3232 | rdev->config.cik.max_backends_per_se = 2; | ||
3233 | } else if ((rdev->pdev->device == 0x1309) || | ||
3234 | (rdev->pdev->device == 0x130A) || | ||
3235 | (rdev->pdev->device == 0x130D) || | ||
3236 | (rdev->pdev->device == 0x1313) || | ||
3237 | (rdev->pdev->device == 0x131D)) { | ||
3238 | rdev->config.cik.max_cu_per_sh = 6; | ||
3239 | rdev->config.cik.max_backends_per_se = 2; | ||
3240 | } else if ((rdev->pdev->device == 0x1306) || | ||
3241 | (rdev->pdev->device == 0x1307) || | ||
3242 | (rdev->pdev->device == 0x130B) || | ||
3243 | (rdev->pdev->device == 0x130E) || | ||
3244 | (rdev->pdev->device == 0x1315) || | ||
3245 | (rdev->pdev->device == 0x1318) || | ||
3246 | (rdev->pdev->device == 0x131B)) { | ||
3247 | rdev->config.cik.max_cu_per_sh = 4; | ||
3248 | rdev->config.cik.max_backends_per_se = 1; | ||
3249 | } else { | ||
3250 | rdev->config.cik.max_cu_per_sh = 3; | ||
3251 | rdev->config.cik.max_backends_per_se = 1; | ||
3252 | } | ||
3253 | rdev->config.cik.max_sh_per_se = 1; | 3226 | rdev->config.cik.max_sh_per_se = 1; |
3254 | rdev->config.cik.max_texture_channel_caches = 4; | 3227 | rdev->config.cik.max_texture_channel_caches = 4; |
3255 | rdev->config.cik.max_gprs = 256; | 3228 | rdev->config.cik.max_gprs = 256; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8d3e3d2e0090..7828a5e10629 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1365,6 +1365,10 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1365 | if ((rdev->flags & RADEON_IS_PCI) && | 1365 | if ((rdev->flags & RADEON_IS_PCI) && |
1366 | (rdev->family <= CHIP_RS740)) | 1366 | (rdev->family <= CHIP_RS740)) |
1367 | rdev->need_dma32 = true; | 1367 | rdev->need_dma32 = true; |
1368 | #ifdef CONFIG_PPC64 | ||
1369 | if (rdev->family == CHIP_CEDAR) | ||
1370 | rdev->need_dma32 = true; | ||
1371 | #endif | ||
1368 | 1372 | ||
1369 | dma_bits = rdev->need_dma32 ? 32 : 40; | 1373 | dma_bits = rdev->need_dma32 ? 32 : 40; |
1370 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); | 1374 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index a9962ffba720..27d8e7dd2d06 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) | |||
34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); | 34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
35 | 35 | ||
36 | if (robj) { | 36 | if (robj) { |
37 | if (robj->gem_base.import_attach) | ||
38 | drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); | ||
39 | radeon_mn_unregister(robj); | 37 | radeon_mn_unregister(robj); |
40 | radeon_bo_unref(&robj); | 38 | radeon_bo_unref(&robj); |
41 | } | 39 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 15404af9d740..31f5ad605e59 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
82 | mutex_unlock(&bo->rdev->gem.mutex); | 82 | mutex_unlock(&bo->rdev->gem.mutex); |
83 | radeon_bo_clear_surface_reg(bo); | 83 | radeon_bo_clear_surface_reg(bo); |
84 | WARN_ON_ONCE(!list_empty(&bo->va)); | 84 | WARN_ON_ONCE(!list_empty(&bo->va)); |
85 | if (bo->gem_base.import_attach) | ||
86 | drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); | ||
85 | drm_gem_object_release(&bo->gem_base); | 87 | drm_gem_object_release(&bo->gem_base); |
86 | kfree(bo); | 88 | kfree(bo); |
87 | } | 89 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 326ad068c15a..4b6542538ff9 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev); | |||
47 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); | 47 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); |
48 | static void radeon_pm_update_profile(struct radeon_device *rdev); | 48 | static void radeon_pm_update_profile(struct radeon_device *rdev); |
49 | static void radeon_pm_set_clocks(struct radeon_device *rdev); | 49 | static void radeon_pm_set_clocks(struct radeon_device *rdev); |
50 | static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev); | ||
51 | 50 | ||
52 | int radeon_pm_get_type_index(struct radeon_device *rdev, | 51 | int radeon_pm_get_type_index(struct radeon_device *rdev, |
53 | enum radeon_pm_state_type ps_type, | 52 | enum radeon_pm_state_type ps_type, |
@@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev) | |||
80 | radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); | 79 | radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); |
81 | } | 80 | } |
82 | mutex_unlock(&rdev->pm.mutex); | 81 | mutex_unlock(&rdev->pm.mutex); |
83 | /* allow new DPM state to be picked */ | ||
84 | radeon_pm_compute_clocks_dpm(rdev); | ||
85 | } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 82 | } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
86 | if (rdev->pm.profile == PM_PROFILE_AUTO) { | 83 | if (rdev->pm.profile == PM_PROFILE_AUTO) { |
87 | mutex_lock(&rdev->pm.mutex); | 84 | mutex_lock(&rdev->pm.mutex); |
@@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | |||
885 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; | 882 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; |
886 | /* balanced states don't exist at the moment */ | 883 | /* balanced states don't exist at the moment */ |
887 | if (dpm_state == POWER_STATE_TYPE_BALANCED) | 884 | if (dpm_state == POWER_STATE_TYPE_BALANCED) |
888 | dpm_state = rdev->pm.dpm.ac_power ? | 885 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; |
889 | POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY; | ||
890 | 886 | ||
891 | restart_search: | 887 | restart_search: |
892 | /* Pick the best power state based on current conditions */ | 888 | /* Pick the best power state based on current conditions */ |
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 5decae0069d0..78cbc3145e44 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c | |||
@@ -93,6 +93,8 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc, | |||
93 | 93 | ||
94 | DRM_DEBUG_DRIVER("Disabling the CRTC\n"); | 94 | DRM_DEBUG_DRIVER("Disabling the CRTC\n"); |
95 | 95 | ||
96 | drm_crtc_vblank_off(crtc); | ||
97 | |||
96 | sun4i_tcon_set_status(scrtc->tcon, encoder, false); | 98 | sun4i_tcon_set_status(scrtc->tcon, encoder, false); |
97 | 99 | ||
98 | if (crtc->state->event && !crtc->state->active) { | 100 | if (crtc->state->event && !crtc->state->active) { |
@@ -113,6 +115,8 @@ static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc, | |||
113 | DRM_DEBUG_DRIVER("Enabling the CRTC\n"); | 115 | DRM_DEBUG_DRIVER("Enabling the CRTC\n"); |
114 | 116 | ||
115 | sun4i_tcon_set_status(scrtc->tcon, encoder, true); | 117 | sun4i_tcon_set_status(scrtc->tcon, encoder, true); |
118 | |||
119 | drm_crtc_vblank_on(crtc); | ||
116 | } | 120 | } |
117 | 121 | ||
118 | static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) | 122 | static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) |
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c index 023f39bda633..e36004fbe453 100644 --- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c +++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c | |||
@@ -132,10 +132,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw) | |||
132 | static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) | 132 | static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) |
133 | { | 133 | { |
134 | struct sun4i_dclk *dclk = hw_to_dclk(hw); | 134 | struct sun4i_dclk *dclk = hw_to_dclk(hw); |
135 | u32 val = degrees / 120; | ||
136 | |||
137 | val <<= 28; | ||
135 | 138 | ||
136 | regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, | 139 | regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, |
137 | GENMASK(29, 28), | 140 | GENMASK(29, 28), |
138 | degrees / 120); | 141 | val); |
139 | 142 | ||
140 | return 0; | 143 | return 0; |
141 | } | 144 | } |
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 832f8f9bc47f..b8da5a50a61d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c | |||
@@ -92,6 +92,8 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector, | |||
92 | 92 | ||
93 | DRM_DEBUG_DRIVER("Vertical parameters OK\n"); | 93 | DRM_DEBUG_DRIVER("Vertical parameters OK\n"); |
94 | 94 | ||
95 | tcon->dclk_min_div = 6; | ||
96 | tcon->dclk_max_div = 127; | ||
95 | rounded_rate = clk_round_rate(tcon->dclk, rate); | 97 | rounded_rate = clk_round_rate(tcon->dclk, rate); |
96 | if (rounded_rate < rate) | 98 | if (rounded_rate < rate) |
97 | return MODE_CLOCK_LOW; | 99 | return MODE_CLOCK_LOW; |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 3c15cf24b503..2de586b7c98b 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
@@ -101,10 +101,12 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel, | |||
101 | return; | 101 | return; |
102 | } | 102 | } |
103 | 103 | ||
104 | if (enabled) | 104 | if (enabled) { |
105 | clk_prepare_enable(clk); | 105 | clk_prepare_enable(clk); |
106 | else | 106 | } else { |
107 | clk_rate_exclusive_put(clk); | ||
107 | clk_disable_unprepare(clk); | 108 | clk_disable_unprepare(clk); |
109 | } | ||
108 | } | 110 | } |
109 | 111 | ||
110 | static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon, | 112 | static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon, |
@@ -260,7 +262,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon, | |||
260 | const struct drm_display_mode *mode) | 262 | const struct drm_display_mode *mode) |
261 | { | 263 | { |
262 | /* Configure the dot clock */ | 264 | /* Configure the dot clock */ |
263 | clk_set_rate(tcon->dclk, mode->crtc_clock * 1000); | 265 | clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000); |
264 | 266 | ||
265 | /* Set the resolution */ | 267 | /* Set the resolution */ |
266 | regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, | 268 | regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, |
@@ -335,6 +337,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon, | |||
335 | regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, | 337 | regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, |
336 | SUN4I_TCON_GCTL_IOMAP_MASK, | 338 | SUN4I_TCON_GCTL_IOMAP_MASK, |
337 | SUN4I_TCON_GCTL_IOMAP_TCON0); | 339 | SUN4I_TCON_GCTL_IOMAP_TCON0); |
340 | |||
341 | /* Enable the output on the pins */ | ||
342 | regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000); | ||
338 | } | 343 | } |
339 | 344 | ||
340 | static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, | 345 | static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, |
@@ -418,7 +423,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, | |||
418 | WARN_ON(!tcon->quirks->has_channel_1); | 423 | WARN_ON(!tcon->quirks->has_channel_1); |
419 | 424 | ||
420 | /* Configure the dot clock */ | 425 | /* Configure the dot clock */ |
421 | clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000); | 426 | clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000); |
422 | 427 | ||
423 | /* Adjust clock delay */ | 428 | /* Adjust clock delay */ |
424 | clk_delay = sun4i_tcon_get_clk_delay(mode, 1); | 429 | clk_delay = sun4i_tcon_get_clk_delay(mode, 1); |
@@ -870,52 +875,56 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, | |||
870 | return ret; | 875 | return ret; |
871 | } | 876 | } |
872 | 877 | ||
873 | /* | 878 | if (tcon->quirks->supports_lvds) { |
874 | * This can only be made optional since we've had DT nodes | 879 | /* |
875 | * without the LVDS reset properties. | 880 | * This can only be made optional since we've had DT |
876 | * | 881 | * nodes without the LVDS reset properties. |
877 | * If the property is missing, just disable LVDS, and print a | 882 | * |
878 | * warning. | 883 | * If the property is missing, just disable LVDS, and |
879 | */ | 884 | * print a warning. |
880 | tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); | 885 | */ |
881 | if (IS_ERR(tcon->lvds_rst)) { | 886 | tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); |
882 | dev_err(dev, "Couldn't get our reset line\n"); | 887 | if (IS_ERR(tcon->lvds_rst)) { |
883 | return PTR_ERR(tcon->lvds_rst); | 888 | dev_err(dev, "Couldn't get our reset line\n"); |
884 | } else if (tcon->lvds_rst) { | 889 | return PTR_ERR(tcon->lvds_rst); |
885 | has_lvds_rst = true; | 890 | } else if (tcon->lvds_rst) { |
886 | reset_control_reset(tcon->lvds_rst); | 891 | has_lvds_rst = true; |
887 | } else { | 892 | reset_control_reset(tcon->lvds_rst); |
888 | has_lvds_rst = false; | 893 | } else { |
889 | } | 894 | has_lvds_rst = false; |
895 | } | ||
890 | 896 | ||
891 | /* | 897 | /* |
892 | * This can only be made optional since we've had DT nodes | 898 | * This can only be made optional since we've had DT |
893 | * without the LVDS reset properties. | 899 | * nodes without the LVDS reset properties. |
894 | * | 900 | * |
895 | * If the property is missing, just disable LVDS, and print a | 901 | * If the property is missing, just disable LVDS, and |
896 | * warning. | 902 | * print a warning. |
897 | */ | 903 | */ |
898 | if (tcon->quirks->has_lvds_alt) { | 904 | if (tcon->quirks->has_lvds_alt) { |
899 | tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); | 905 | tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); |
900 | if (IS_ERR(tcon->lvds_pll)) { | 906 | if (IS_ERR(tcon->lvds_pll)) { |
901 | if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { | 907 | if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { |
902 | has_lvds_alt = false; | 908 | has_lvds_alt = false; |
909 | } else { | ||
910 | dev_err(dev, "Couldn't get the LVDS PLL\n"); | ||
911 | return PTR_ERR(tcon->lvds_pll); | ||
912 | } | ||
903 | } else { | 913 | } else { |
904 | dev_err(dev, "Couldn't get the LVDS PLL\n"); | 914 | has_lvds_alt = true; |
905 | return PTR_ERR(tcon->lvds_pll); | ||
906 | } | 915 | } |
907 | } else { | ||
908 | has_lvds_alt = true; | ||
909 | } | 916 | } |
910 | } | ||
911 | 917 | ||
912 | if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { | 918 | if (!has_lvds_rst || |
913 | dev_warn(dev, | 919 | (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { |
914 | "Missing LVDS properties, Please upgrade your DT\n"); | 920 | dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n"); |
915 | dev_warn(dev, "LVDS output disabled\n"); | 921 | dev_warn(dev, "LVDS output disabled\n"); |
916 | can_lvds = false; | 922 | can_lvds = false; |
923 | } else { | ||
924 | can_lvds = true; | ||
925 | } | ||
917 | } else { | 926 | } else { |
918 | can_lvds = true; | 927 | can_lvds = false; |
919 | } | 928 | } |
920 | 929 | ||
921 | ret = sun4i_tcon_init_clocks(dev, tcon); | 930 | ret = sun4i_tcon_init_clocks(dev, tcon); |
@@ -1134,7 +1143,7 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = { | |||
1134 | }; | 1143 | }; |
1135 | 1144 | ||
1136 | static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { | 1145 | static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { |
1137 | /* nothing is supported */ | 1146 | .supports_lvds = true, |
1138 | }; | 1147 | }; |
1139 | 1148 | ||
1140 | static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { | 1149 | static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index b761c7b823c5..278700c7bf9f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h | |||
@@ -175,6 +175,7 @@ struct sun4i_tcon_quirks { | |||
175 | bool has_channel_1; /* a33 does not have channel 1 */ | 175 | bool has_channel_1; /* a33 does not have channel 1 */ |
176 | bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */ | 176 | bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */ |
177 | bool needs_de_be_mux; /* sun6i needs mux to select backend */ | 177 | bool needs_de_be_mux; /* sun6i needs mux to select backend */ |
178 | bool supports_lvds; /* Does the TCON support an LVDS output? */ | ||
178 | 179 | ||
179 | /* callback to handle tcon muxing options */ | 180 | /* callback to handle tcon muxing options */ |
180 | int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); | 181 | int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); |
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 5720a0d4ac0a..677ac16c8a6d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c | |||
@@ -197,6 +197,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, | |||
197 | case VIRTGPU_PARAM_3D_FEATURES: | 197 | case VIRTGPU_PARAM_3D_FEATURES: |
198 | value = vgdev->has_virgl_3d == true ? 1 : 0; | 198 | value = vgdev->has_virgl_3d == true ? 1 : 0; |
199 | break; | 199 | break; |
200 | case VIRTGPU_PARAM_CAPSET_QUERY_FIX: | ||
201 | value = 1; | ||
202 | break; | ||
200 | default: | 203 | default: |
201 | return -EINVAL; | 204 | return -EINVAL; |
202 | } | 205 | } |
@@ -472,7 +475,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, | |||
472 | { | 475 | { |
473 | struct virtio_gpu_device *vgdev = dev->dev_private; | 476 | struct virtio_gpu_device *vgdev = dev->dev_private; |
474 | struct drm_virtgpu_get_caps *args = data; | 477 | struct drm_virtgpu_get_caps *args = data; |
475 | int size; | 478 | unsigned size, host_caps_size; |
476 | int i; | 479 | int i; |
477 | int found_valid = -1; | 480 | int found_valid = -1; |
478 | int ret; | 481 | int ret; |
@@ -481,6 +484,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, | |||
481 | if (vgdev->num_capsets == 0) | 484 | if (vgdev->num_capsets == 0) |
482 | return -ENOSYS; | 485 | return -ENOSYS; |
483 | 486 | ||
487 | /* don't allow userspace to pass 0 */ | ||
488 | if (args->size == 0) | ||
489 | return -EINVAL; | ||
490 | |||
484 | spin_lock(&vgdev->display_info_lock); | 491 | spin_lock(&vgdev->display_info_lock); |
485 | for (i = 0; i < vgdev->num_capsets; i++) { | 492 | for (i = 0; i < vgdev->num_capsets; i++) { |
486 | if (vgdev->capsets[i].id == args->cap_set_id) { | 493 | if (vgdev->capsets[i].id == args->cap_set_id) { |
@@ -496,11 +503,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, | |||
496 | return -EINVAL; | 503 | return -EINVAL; |
497 | } | 504 | } |
498 | 505 | ||
499 | size = vgdev->capsets[found_valid].max_size; | 506 | host_caps_size = vgdev->capsets[found_valid].max_size; |
500 | if (args->size > size) { | 507 | /* only copy to user the minimum of the host caps size or the guest caps size */ |
501 | spin_unlock(&vgdev->display_info_lock); | 508 | size = min(args->size, host_caps_size); |
502 | return -EINVAL; | ||
503 | } | ||
504 | 509 | ||
505 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { | 510 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { |
506 | if (cache_ent->id == args->cap_set_id && | 511 | if (cache_ent->id == args->cap_set_id && |
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c index 1d8775799056..d9607905dc2f 100644 --- a/drivers/i2c/busses/i2c-octeon-core.c +++ b/drivers/i2c/busses/i2c-octeon-core.c | |||
@@ -233,6 +233,7 @@ static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read) | |||
233 | return -EOPNOTSUPP; | 233 | return -EOPNOTSUPP; |
234 | 234 | ||
235 | case STAT_TXDATA_NAK: | 235 | case STAT_TXDATA_NAK: |
236 | case STAT_BUS_ERROR: | ||
236 | return -EIO; | 237 | return -EIO; |
237 | case STAT_TXADDR_NAK: | 238 | case STAT_TXADDR_NAK: |
238 | case STAT_RXADDR_NAK: | 239 | case STAT_RXADDR_NAK: |
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h index a7ef19855bb8..9bb9f64fdda0 100644 --- a/drivers/i2c/busses/i2c-octeon-core.h +++ b/drivers/i2c/busses/i2c-octeon-core.h | |||
@@ -43,7 +43,7 @@ | |||
43 | #define TWSI_CTL_AAK 0x04 /* Assert ACK */ | 43 | #define TWSI_CTL_AAK 0x04 /* Assert ACK */ |
44 | 44 | ||
45 | /* Status values */ | 45 | /* Status values */ |
46 | #define STAT_ERROR 0x00 | 46 | #define STAT_BUS_ERROR 0x00 |
47 | #define STAT_START 0x08 | 47 | #define STAT_START 0x08 |
48 | #define STAT_REP_START 0x10 | 48 | #define STAT_REP_START 0x10 |
49 | #define STAT_TXADDR_ACK 0x18 | 49 | #define STAT_TXADDR_ACK 0x18 |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 17fd55af4d92..caa20eb5f26b 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -928,7 +928,7 @@ static int exact_lock(dev_t dev, void *data) | |||
928 | { | 928 | { |
929 | struct gendisk *p = data; | 929 | struct gendisk *p = data; |
930 | 930 | ||
931 | if (!get_disk(p)) | 931 | if (!get_disk_and_module(p)) |
932 | return -1; | 932 | return -1; |
933 | return 0; | 933 | return 0; |
934 | } | 934 | } |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index a5b4cf030c11..9183d148d644 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in, | |||
550 | dst_release(dst); | 550 | dst_release(dst); |
551 | } | 551 | } |
552 | 552 | ||
553 | if (ndev->flags & IFF_LOOPBACK) { | 553 | if (ndev) { |
554 | ret = rdma_translate_ip(dst_in, addr); | 554 | if (ndev->flags & IFF_LOOPBACK) |
555 | /* | 555 | ret = rdma_translate_ip(dst_in, addr); |
556 | * Put the loopback device and get the translated | 556 | else |
557 | * device instead. | 557 | addr->bound_dev_if = ndev->ifindex; |
558 | */ | ||
559 | dev_put(ndev); | 558 | dev_put(ndev); |
560 | ndev = dev_get_by_index(addr->net, addr->bound_dev_if); | ||
561 | } else { | ||
562 | addr->bound_dev_if = ndev->ifindex; | ||
563 | } | 559 | } |
564 | dev_put(ndev); | ||
565 | 560 | ||
566 | return ret; | 561 | return ret; |
567 | } | 562 | } |
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index bc79ca8215d7..af5ad6a56ae4 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | /* # of WCs to poll for with a single call to ib_poll_cq */ | 18 | /* # of WCs to poll for with a single call to ib_poll_cq */ |
19 | #define IB_POLL_BATCH 16 | 19 | #define IB_POLL_BATCH 16 |
20 | #define IB_POLL_BATCH_DIRECT 8 | ||
20 | 21 | ||
21 | /* # of WCs to iterate over before yielding */ | 22 | /* # of WCs to iterate over before yielding */ |
22 | #define IB_POLL_BUDGET_IRQ 256 | 23 | #define IB_POLL_BUDGET_IRQ 256 |
@@ -25,18 +26,18 @@ | |||
25 | #define IB_POLL_FLAGS \ | 26 | #define IB_POLL_FLAGS \ |
26 | (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) | 27 | (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) |
27 | 28 | ||
28 | static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) | 29 | static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, |
30 | int batch) | ||
29 | { | 31 | { |
30 | int i, n, completed = 0; | 32 | int i, n, completed = 0; |
31 | struct ib_wc *wcs = poll_wc ? : cq->wc; | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * budget might be (-1) if the caller does not | 35 | * budget might be (-1) if the caller does not |
35 | * want to bound this call, thus we need unsigned | 36 | * want to bound this call, thus we need unsigned |
36 | * minimum here. | 37 | * minimum here. |
37 | */ | 38 | */ |
38 | while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, | 39 | while ((n = ib_poll_cq(cq, min_t(u32, batch, |
39 | budget - completed), wcs)) > 0) { | 40 | budget - completed), wcs)) > 0) { |
40 | for (i = 0; i < n; i++) { | 41 | for (i = 0; i < n; i++) { |
41 | struct ib_wc *wc = &wcs[i]; | 42 | struct ib_wc *wc = &wcs[i]; |
42 | 43 | ||
@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) | |||
48 | 49 | ||
49 | completed += n; | 50 | completed += n; |
50 | 51 | ||
51 | if (n != IB_POLL_BATCH || | 52 | if (n != batch || (budget != -1 && completed >= budget)) |
52 | (budget != -1 && completed >= budget)) | ||
53 | break; | 53 | break; |
54 | } | 54 | } |
55 | 55 | ||
@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) | |||
72 | */ | 72 | */ |
73 | int ib_process_cq_direct(struct ib_cq *cq, int budget) | 73 | int ib_process_cq_direct(struct ib_cq *cq, int budget) |
74 | { | 74 | { |
75 | struct ib_wc wcs[IB_POLL_BATCH]; | 75 | struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; |
76 | 76 | ||
77 | return __ib_process_cq(cq, budget, wcs); | 77 | return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); |
78 | } | 78 | } |
79 | EXPORT_SYMBOL(ib_process_cq_direct); | 79 | EXPORT_SYMBOL(ib_process_cq_direct); |
80 | 80 | ||
@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget) | |||
88 | struct ib_cq *cq = container_of(iop, struct ib_cq, iop); | 88 | struct ib_cq *cq = container_of(iop, struct ib_cq, iop); |
89 | int completed; | 89 | int completed; |
90 | 90 | ||
91 | completed = __ib_process_cq(cq, budget, NULL); | 91 | completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); |
92 | if (completed < budget) { | 92 | if (completed < budget) { |
93 | irq_poll_complete(&cq->iop); | 93 | irq_poll_complete(&cq->iop); |
94 | if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) | 94 | if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) |
@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work) | |||
108 | struct ib_cq *cq = container_of(work, struct ib_cq, work); | 108 | struct ib_cq *cq = container_of(work, struct ib_cq, work); |
109 | int completed; | 109 | int completed; |
110 | 110 | ||
111 | completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); | 111 | completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, |
112 | IB_POLL_BATCH); | ||
112 | if (completed >= IB_POLL_BUDGET_WORKQUEUE || | 113 | if (completed >= IB_POLL_BUDGET_WORKQUEUE || |
113 | ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) | 114 | ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) |
114 | queue_work(ib_comp_wq, &cq->work); | 115 | queue_work(ib_comp_wq, &cq->work); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index e8010e73a1cf..bb065c9449be 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device, | |||
536 | ret = device->query_device(device, &device->attrs, &uhw); | 536 | ret = device->query_device(device, &device->attrs, &uhw); |
537 | if (ret) { | 537 | if (ret) { |
538 | pr_warn("Couldn't query the device attributes\n"); | 538 | pr_warn("Couldn't query the device attributes\n"); |
539 | goto cache_cleanup; | 539 | goto cg_cleanup; |
540 | } | 540 | } |
541 | 541 | ||
542 | ret = ib_device_register_sysfs(device, port_callback); | 542 | ret = ib_device_register_sysfs(device, port_callback); |
543 | if (ret) { | 543 | if (ret) { |
544 | pr_warn("Couldn't register device %s with driver model\n", | 544 | pr_warn("Couldn't register device %s with driver model\n", |
545 | device->name); | 545 | device->name); |
546 | goto cache_cleanup; | 546 | goto cg_cleanup; |
547 | } | 547 | } |
548 | 548 | ||
549 | device->reg_state = IB_DEV_REGISTERED; | 549 | device->reg_state = IB_DEV_REGISTERED; |
@@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device, | |||
559 | mutex_unlock(&device_mutex); | 559 | mutex_unlock(&device_mutex); |
560 | return 0; | 560 | return 0; |
561 | 561 | ||
562 | cg_cleanup: | ||
563 | ib_device_unregister_rdmacg(device); | ||
562 | cache_cleanup: | 564 | cache_cleanup: |
563 | ib_cache_cleanup_one(device); | 565 | ib_cache_cleanup_one(device); |
564 | ib_cache_release_one(device); | 566 | ib_cache_release_one(device); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 8cf15d4a8ac4..9f029a1ca5ea 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, | |||
1291 | 1291 | ||
1292 | resolved_dev = dev_get_by_index(dev_addr.net, | 1292 | resolved_dev = dev_get_by_index(dev_addr.net, |
1293 | dev_addr.bound_dev_if); | 1293 | dev_addr.bound_dev_if); |
1294 | if (resolved_dev->flags & IFF_LOOPBACK) { | 1294 | if (!resolved_dev) { |
1295 | dev_put(resolved_dev); | 1295 | dev_put(idev); |
1296 | resolved_dev = idev; | 1296 | return -ENODEV; |
1297 | dev_hold(resolved_dev); | ||
1298 | } | 1297 | } |
1299 | ndev = ib_get_ndev_from_path(rec); | 1298 | ndev = ib_get_ndev_from_path(rec); |
1300 | rcu_read_lock(); | 1299 | rcu_read_lock(); |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index f015f1bf88c9..3a9d0f5b5881 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -1149,6 +1149,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, | |||
1149 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | 1149 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) |
1150 | return -EFAULT; | 1150 | return -EFAULT; |
1151 | 1151 | ||
1152 | if (cmd.qp_state > IB_QPS_ERR) | ||
1153 | return -EINVAL; | ||
1154 | |||
1152 | ctx = ucma_get_ctx(file, cmd.id); | 1155 | ctx = ucma_get_ctx(file, cmd.id); |
1153 | if (IS_ERR(ctx)) | 1156 | if (IS_ERR(ctx)) |
1154 | return PTR_ERR(ctx); | 1157 | return PTR_ERR(ctx); |
@@ -1294,6 +1297,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, | |||
1294 | if (IS_ERR(ctx)) | 1297 | if (IS_ERR(ctx)) |
1295 | return PTR_ERR(ctx); | 1298 | return PTR_ERR(ctx); |
1296 | 1299 | ||
1300 | if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) | ||
1301 | return -EINVAL; | ||
1302 | |||
1297 | optval = memdup_user((void __user *) (unsigned long) cmd.optval, | 1303 | optval = memdup_user((void __user *) (unsigned long) cmd.optval, |
1298 | cmd.optlen); | 1304 | cmd.optlen); |
1299 | if (IS_ERR(optval)) { | 1305 | if (IS_ERR(optval)) { |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 643174d949a8..0dd75f449872 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
@@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) | |||
785 | return 0; | 785 | return 0; |
786 | } | 786 | } |
787 | 787 | ||
788 | static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) | 788 | unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) |
789 | __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) | 789 | __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) |
790 | { | 790 | { |
791 | unsigned long flags; | 791 | unsigned long flags; |
@@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) | |||
799 | return flags; | 799 | return flags; |
800 | } | 800 | } |
801 | 801 | ||
802 | static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, | 802 | void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, |
803 | unsigned long flags) | 803 | unsigned long flags) |
804 | __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) | 804 | __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) |
805 | { | 805 | { |
806 | if (qp->rcq != qp->scq) | 806 | if (qp->rcq != qp->scq) |
@@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
1606 | int status; | 1606 | int status; |
1607 | union ib_gid sgid; | 1607 | union ib_gid sgid; |
1608 | struct ib_gid_attr sgid_attr; | 1608 | struct ib_gid_attr sgid_attr; |
1609 | unsigned int flags; | ||
1609 | u8 nw_type; | 1610 | u8 nw_type; |
1610 | 1611 | ||
1611 | qp->qplib_qp.modify_flags = 0; | 1612 | qp->qplib_qp.modify_flags = 0; |
@@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
1634 | dev_dbg(rdev_to_dev(rdev), | 1635 | dev_dbg(rdev_to_dev(rdev), |
1635 | "Move QP = %p to flush list\n", | 1636 | "Move QP = %p to flush list\n", |
1636 | qp); | 1637 | qp); |
1638 | flags = bnxt_re_lock_cqs(qp); | ||
1637 | bnxt_qplib_add_flush_qp(&qp->qplib_qp); | 1639 | bnxt_qplib_add_flush_qp(&qp->qplib_qp); |
1640 | bnxt_re_unlock_cqs(qp, flags); | ||
1638 | } | 1641 | } |
1639 | if (!qp->sumem && | 1642 | if (!qp->sumem && |
1640 | qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { | 1643 | qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { |
1641 | dev_dbg(rdev_to_dev(rdev), | 1644 | dev_dbg(rdev_to_dev(rdev), |
1642 | "Move QP = %p out of flush list\n", | 1645 | "Move QP = %p out of flush list\n", |
1643 | qp); | 1646 | qp); |
1647 | flags = bnxt_re_lock_cqs(qp); | ||
1644 | bnxt_qplib_clean_qp(&qp->qplib_qp); | 1648 | bnxt_qplib_clean_qp(&qp->qplib_qp); |
1649 | bnxt_re_unlock_cqs(qp, flags); | ||
1645 | } | 1650 | } |
1646 | } | 1651 | } |
1647 | if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { | 1652 | if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { |
@@ -2227,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, | |||
2227 | wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; | 2232 | wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; |
2228 | wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; | 2233 | wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; |
2229 | 2234 | ||
2235 | /* Need unconditional fence for local invalidate | ||
2236 | * opcode to work as expected. | ||
2237 | */ | ||
2238 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; | ||
2239 | |||
2230 | if (wr->send_flags & IB_SEND_SIGNALED) | 2240 | if (wr->send_flags & IB_SEND_SIGNALED) |
2231 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; | 2241 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2232 | if (wr->send_flags & IB_SEND_FENCE) | ||
2233 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; | ||
2234 | if (wr->send_flags & IB_SEND_SOLICITED) | 2242 | if (wr->send_flags & IB_SEND_SOLICITED) |
2235 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; | 2243 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; |
2236 | 2244 | ||
@@ -2251,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, | |||
2251 | wqe->frmr.levels = qplib_frpl->hwq.level + 1; | 2259 | wqe->frmr.levels = qplib_frpl->hwq.level + 1; |
2252 | wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; | 2260 | wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; |
2253 | 2261 | ||
2254 | if (wr->wr.send_flags & IB_SEND_FENCE) | 2262 | /* Need unconditional fence for reg_mr |
2255 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; | 2263 | * opcode to function as expected. |
2264 | */ | ||
2265 | |||
2266 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; | ||
2267 | |||
2256 | if (wr->wr.send_flags & IB_SEND_SIGNALED) | 2268 | if (wr->wr.send_flags & IB_SEND_SIGNALED) |
2257 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; | 2269 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2258 | 2270 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index b88a48d43a9d..e62b7c2c7da6 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h | |||
@@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, | |||
222 | struct ib_udata *udata); | 222 | struct ib_udata *udata); |
223 | int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); | 223 | int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); |
224 | int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | 224 | int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); |
225 | |||
226 | unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); | ||
227 | void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags); | ||
225 | #endif /* __BNXT_RE_IB_VERBS_H__ */ | 228 | #endif /* __BNXT_RE_IB_VERBS_H__ */ |
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 33a448036c2e..f6e361750466 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c | |||
@@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, | |||
730 | struct bnxt_re_qp *qp) | 730 | struct bnxt_re_qp *qp) |
731 | { | 731 | { |
732 | struct ib_event event; | 732 | struct ib_event event; |
733 | unsigned int flags; | ||
734 | |||
735 | if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { | ||
736 | flags = bnxt_re_lock_cqs(qp); | ||
737 | bnxt_qplib_add_flush_qp(&qp->qplib_qp); | ||
738 | bnxt_re_unlock_cqs(qp, flags); | ||
739 | } | ||
733 | 740 | ||
734 | memset(&event, 0, sizeof(event)); | 741 | memset(&event, 0, sizeof(event)); |
735 | if (qp->qplib_qp.srq) { | 742 | if (qp->qplib_qp.srq) { |
@@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work) | |||
1416 | switch (re_work->event) { | 1423 | switch (re_work->event) { |
1417 | case NETDEV_REGISTER: | 1424 | case NETDEV_REGISTER: |
1418 | rc = bnxt_re_ib_reg(rdev); | 1425 | rc = bnxt_re_ib_reg(rdev); |
1419 | if (rc) | 1426 | if (rc) { |
1420 | dev_err(rdev_to_dev(rdev), | 1427 | dev_err(rdev_to_dev(rdev), |
1421 | "Failed to register with IB: %#x", rc); | 1428 | "Failed to register with IB: %#x", rc); |
1429 | bnxt_re_remove_one(rdev); | ||
1430 | bnxt_re_dev_unreg(rdev); | ||
1431 | } | ||
1422 | break; | 1432 | break; |
1423 | case NETDEV_UP: | 1433 | case NETDEV_UP: |
1424 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, | 1434 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 3ea5b9624f6b..06b42c880fd4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c | |||
@@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) | |||
88 | } | 88 | } |
89 | } | 89 | } |
90 | 90 | ||
91 | void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, | 91 | static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp, |
92 | unsigned long *flags) | 92 | unsigned long *flags) |
93 | __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) | 93 | __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) |
94 | { | 94 | { |
95 | spin_lock_irqsave(&qp->scq->hwq.lock, *flags); | 95 | spin_lock_irqsave(&qp->scq->flush_lock, *flags); |
96 | if (qp->scq == qp->rcq) | 96 | if (qp->scq == qp->rcq) |
97 | __acquire(&qp->rcq->hwq.lock); | 97 | __acquire(&qp->rcq->flush_lock); |
98 | else | 98 | else |
99 | spin_lock(&qp->rcq->hwq.lock); | 99 | spin_lock(&qp->rcq->flush_lock); |
100 | } | 100 | } |
101 | 101 | ||
102 | void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, | 102 | static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp, |
103 | unsigned long *flags) | 103 | unsigned long *flags) |
104 | __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) | 104 | __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) |
105 | { | 105 | { |
106 | if (qp->scq == qp->rcq) | 106 | if (qp->scq == qp->rcq) |
107 | __release(&qp->rcq->hwq.lock); | 107 | __release(&qp->rcq->flush_lock); |
108 | else | 108 | else |
109 | spin_unlock(&qp->rcq->hwq.lock); | 109 | spin_unlock(&qp->rcq->flush_lock); |
110 | spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); | 110 | spin_unlock_irqrestore(&qp->scq->flush_lock, *flags); |
111 | } | ||
112 | |||
113 | static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp, | ||
114 | struct bnxt_qplib_cq *cq) | ||
115 | { | ||
116 | struct bnxt_qplib_cq *buddy_cq = NULL; | ||
117 | |||
118 | if (qp->scq == qp->rcq) | ||
119 | buddy_cq = NULL; | ||
120 | else if (qp->scq == cq) | ||
121 | buddy_cq = qp->rcq; | ||
122 | else | ||
123 | buddy_cq = qp->scq; | ||
124 | return buddy_cq; | ||
125 | } | ||
126 | |||
127 | static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp, | ||
128 | struct bnxt_qplib_cq *cq) | ||
129 | __acquires(&buddy_cq->hwq.lock) | ||
130 | { | ||
131 | struct bnxt_qplib_cq *buddy_cq = NULL; | ||
132 | |||
133 | buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); | ||
134 | if (!buddy_cq) | ||
135 | __acquire(&cq->hwq.lock); | ||
136 | else | ||
137 | spin_lock(&buddy_cq->hwq.lock); | ||
138 | } | ||
139 | |||
140 | static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp, | ||
141 | struct bnxt_qplib_cq *cq) | ||
142 | __releases(&buddy_cq->hwq.lock) | ||
143 | { | ||
144 | struct bnxt_qplib_cq *buddy_cq = NULL; | ||
145 | |||
146 | buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); | ||
147 | if (!buddy_cq) | ||
148 | __release(&cq->hwq.lock); | ||
149 | else | ||
150 | spin_unlock(&buddy_cq->hwq.lock); | ||
151 | } | 111 | } |
152 | 112 | ||
153 | void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) | 113 | void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) |
154 | { | 114 | { |
155 | unsigned long flags; | 115 | unsigned long flags; |
156 | 116 | ||
157 | bnxt_qplib_acquire_cq_locks(qp, &flags); | 117 | bnxt_qplib_acquire_cq_flush_locks(qp, &flags); |
158 | __bnxt_qplib_add_flush_qp(qp); | 118 | __bnxt_qplib_add_flush_qp(qp); |
159 | bnxt_qplib_release_cq_locks(qp, &flags); | 119 | bnxt_qplib_release_cq_flush_locks(qp, &flags); |
160 | } | 120 | } |
161 | 121 | ||
162 | static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) | 122 | static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) |
@@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) | |||
177 | { | 137 | { |
178 | unsigned long flags; | 138 | unsigned long flags; |
179 | 139 | ||
180 | bnxt_qplib_acquire_cq_locks(qp, &flags); | 140 | bnxt_qplib_acquire_cq_flush_locks(qp, &flags); |
181 | __clean_cq(qp->scq, (u64)(unsigned long)qp); | 141 | __clean_cq(qp->scq, (u64)(unsigned long)qp); |
182 | qp->sq.hwq.prod = 0; | 142 | qp->sq.hwq.prod = 0; |
183 | qp->sq.hwq.cons = 0; | 143 | qp->sq.hwq.cons = 0; |
@@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) | |||
186 | qp->rq.hwq.cons = 0; | 146 | qp->rq.hwq.cons = 0; |
187 | 147 | ||
188 | __bnxt_qplib_del_flush_qp(qp); | 148 | __bnxt_qplib_del_flush_qp(qp); |
189 | bnxt_qplib_release_cq_locks(qp, &flags); | 149 | bnxt_qplib_release_cq_flush_locks(qp, &flags); |
190 | } | 150 | } |
191 | 151 | ||
192 | static void bnxt_qpn_cqn_sched_task(struct work_struct *work) | 152 | static void bnxt_qpn_cqn_sched_task(struct work_struct *work) |
@@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle) | |||
2107 | /* Must block new posting of SQ and RQ */ | 2067 | /* Must block new posting of SQ and RQ */ |
2108 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; | 2068 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; |
2109 | bnxt_qplib_cancel_phantom_processing(qp); | 2069 | bnxt_qplib_cancel_phantom_processing(qp); |
2110 | |||
2111 | /* Add qp to flush list of the CQ */ | ||
2112 | __bnxt_qplib_add_flush_qp(qp); | ||
2113 | } | 2070 | } |
2114 | 2071 | ||
2115 | /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) | 2072 | /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) |
@@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, | |||
2285 | sw_sq_cons, cqe->wr_id, cqe->status); | 2242 | sw_sq_cons, cqe->wr_id, cqe->status); |
2286 | cqe++; | 2243 | cqe++; |
2287 | (*budget)--; | 2244 | (*budget)--; |
2288 | bnxt_qplib_lock_buddy_cq(qp, cq); | ||
2289 | bnxt_qplib_mark_qp_error(qp); | 2245 | bnxt_qplib_mark_qp_error(qp); |
2290 | bnxt_qplib_unlock_buddy_cq(qp, cq); | 2246 | /* Add qp to flush list of the CQ */ |
2247 | bnxt_qplib_add_flush_qp(qp); | ||
2291 | } else { | 2248 | } else { |
2292 | if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { | 2249 | if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { |
2293 | /* Before we complete, do WA 9060 */ | 2250 | /* Before we complete, do WA 9060 */ |
@@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, | |||
2403 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { | 2360 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { |
2404 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; | 2361 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; |
2405 | /* Add qp to flush list of the CQ */ | 2362 | /* Add qp to flush list of the CQ */ |
2406 | bnxt_qplib_lock_buddy_cq(qp, cq); | 2363 | bnxt_qplib_add_flush_qp(qp); |
2407 | __bnxt_qplib_add_flush_qp(qp); | ||
2408 | bnxt_qplib_unlock_buddy_cq(qp, cq); | ||
2409 | } | 2364 | } |
2410 | } | 2365 | } |
2411 | 2366 | ||
@@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, | |||
2489 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { | 2444 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { |
2490 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; | 2445 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; |
2491 | /* Add qp to flush list of the CQ */ | 2446 | /* Add qp to flush list of the CQ */ |
2492 | bnxt_qplib_lock_buddy_cq(qp, cq); | 2447 | bnxt_qplib_add_flush_qp(qp); |
2493 | __bnxt_qplib_add_flush_qp(qp); | ||
2494 | bnxt_qplib_unlock_buddy_cq(qp, cq); | ||
2495 | } | 2448 | } |
2496 | } | 2449 | } |
2497 | done: | 2450 | done: |
@@ -2501,11 +2454,9 @@ done: | |||
2501 | bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) | 2454 | bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) |
2502 | { | 2455 | { |
2503 | struct cq_base *hw_cqe, **hw_cqe_ptr; | 2456 | struct cq_base *hw_cqe, **hw_cqe_ptr; |
2504 | unsigned long flags; | ||
2505 | u32 sw_cons, raw_cons; | 2457 | u32 sw_cons, raw_cons; |
2506 | bool rc = true; | 2458 | bool rc = true; |
2507 | 2459 | ||
2508 | spin_lock_irqsave(&cq->hwq.lock, flags); | ||
2509 | raw_cons = cq->hwq.cons; | 2460 | raw_cons = cq->hwq.cons; |
2510 | sw_cons = HWQ_CMP(raw_cons, &cq->hwq); | 2461 | sw_cons = HWQ_CMP(raw_cons, &cq->hwq); |
2511 | hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; | 2462 | hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; |
@@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) | |||
2513 | 2464 | ||
2514 | /* Check for Valid bit. If the CQE is valid, return false */ | 2465 | /* Check for Valid bit. If the CQE is valid, return false */ |
2515 | rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); | 2466 | rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); |
2516 | spin_unlock_irqrestore(&cq->hwq.lock, flags); | ||
2517 | return rc; | 2467 | return rc; |
2518 | } | 2468 | } |
2519 | 2469 | ||
@@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, | |||
2602 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { | 2552 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { |
2603 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; | 2553 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; |
2604 | /* Add qp to flush list of the CQ */ | 2554 | /* Add qp to flush list of the CQ */ |
2605 | bnxt_qplib_lock_buddy_cq(qp, cq); | 2555 | bnxt_qplib_add_flush_qp(qp); |
2606 | __bnxt_qplib_add_flush_qp(qp); | ||
2607 | bnxt_qplib_unlock_buddy_cq(qp, cq); | ||
2608 | } | 2556 | } |
2609 | } | 2557 | } |
2610 | 2558 | ||
@@ -2719,9 +2667,7 @@ do_rq: | |||
2719 | */ | 2667 | */ |
2720 | 2668 | ||
2721 | /* Add qp to flush list of the CQ */ | 2669 | /* Add qp to flush list of the CQ */ |
2722 | bnxt_qplib_lock_buddy_cq(qp, cq); | 2670 | bnxt_qplib_add_flush_qp(qp); |
2723 | __bnxt_qplib_add_flush_qp(qp); | ||
2724 | bnxt_qplib_unlock_buddy_cq(qp, cq); | ||
2725 | done: | 2671 | done: |
2726 | return rc; | 2672 | return rc; |
2727 | } | 2673 | } |
@@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, | |||
2750 | u32 budget = num_cqes; | 2696 | u32 budget = num_cqes; |
2751 | unsigned long flags; | 2697 | unsigned long flags; |
2752 | 2698 | ||
2753 | spin_lock_irqsave(&cq->hwq.lock, flags); | 2699 | spin_lock_irqsave(&cq->flush_lock, flags); |
2754 | list_for_each_entry(qp, &cq->sqf_head, sq_flush) { | 2700 | list_for_each_entry(qp, &cq->sqf_head, sq_flush) { |
2755 | dev_dbg(&cq->hwq.pdev->dev, | 2701 | dev_dbg(&cq->hwq.pdev->dev, |
2756 | "QPLIB: FP: Flushing SQ QP= %p", | 2702 | "QPLIB: FP: Flushing SQ QP= %p", |
@@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, | |||
2764 | qp); | 2710 | qp); |
2765 | __flush_rq(&qp->rq, qp, &cqe, &budget); | 2711 | __flush_rq(&qp->rq, qp, &cqe, &budget); |
2766 | } | 2712 | } |
2767 | spin_unlock_irqrestore(&cq->hwq.lock, flags); | 2713 | spin_unlock_irqrestore(&cq->flush_lock, flags); |
2768 | 2714 | ||
2769 | return num_cqes - budget; | 2715 | return num_cqes - budget; |
2770 | } | 2716 | } |
@@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, | |||
2773 | int num_cqes, struct bnxt_qplib_qp **lib_qp) | 2719 | int num_cqes, struct bnxt_qplib_qp **lib_qp) |
2774 | { | 2720 | { |
2775 | struct cq_base *hw_cqe, **hw_cqe_ptr; | 2721 | struct cq_base *hw_cqe, **hw_cqe_ptr; |
2776 | unsigned long flags; | ||
2777 | u32 sw_cons, raw_cons; | 2722 | u32 sw_cons, raw_cons; |
2778 | int budget, rc = 0; | 2723 | int budget, rc = 0; |
2779 | 2724 | ||
2780 | spin_lock_irqsave(&cq->hwq.lock, flags); | ||
2781 | raw_cons = cq->hwq.cons; | 2725 | raw_cons = cq->hwq.cons; |
2782 | budget = num_cqes; | 2726 | budget = num_cqes; |
2783 | 2727 | ||
@@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, | |||
2853 | bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); | 2797 | bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); |
2854 | } | 2798 | } |
2855 | exit: | 2799 | exit: |
2856 | spin_unlock_irqrestore(&cq->hwq.lock, flags); | ||
2857 | return num_cqes - budget; | 2800 | return num_cqes - budget; |
2858 | } | 2801 | } |
2859 | 2802 | ||
2860 | void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) | 2803 | void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) |
2861 | { | 2804 | { |
2862 | unsigned long flags; | ||
2863 | |||
2864 | spin_lock_irqsave(&cq->hwq.lock, flags); | ||
2865 | if (arm_type) | 2805 | if (arm_type) |
2866 | bnxt_qplib_arm_cq(cq, arm_type); | 2806 | bnxt_qplib_arm_cq(cq, arm_type); |
2867 | /* Using cq->arm_state variable to track whether to issue cq handler */ | 2807 | /* Using cq->arm_state variable to track whether to issue cq handler */ |
2868 | atomic_set(&cq->arm_state, 1); | 2808 | atomic_set(&cq->arm_state, 1); |
2869 | spin_unlock_irqrestore(&cq->hwq.lock, flags); | ||
2870 | } | 2809 | } |
2871 | 2810 | ||
2872 | void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) | 2811 | void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index ca0a2ffa3509..ade9f13c0fd1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h | |||
@@ -389,6 +389,18 @@ struct bnxt_qplib_cq { | |||
389 | struct list_head sqf_head, rqf_head; | 389 | struct list_head sqf_head, rqf_head; |
390 | atomic_t arm_state; | 390 | atomic_t arm_state; |
391 | spinlock_t compl_lock; /* synch CQ handlers */ | 391 | spinlock_t compl_lock; /* synch CQ handlers */ |
392 | /* Locking Notes: | ||
393 | * QP can move to error state from modify_qp, async error event or error | ||
394 | * CQE as part of poll_cq. When QP is moved to error state, it gets added | ||
395 | * to two flush lists, one each for SQ and RQ. | ||
396 | * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq | ||
397 | * flush_locks should be acquired when QP is moved to error. The control path | ||
398 | * operations(modify_qp and async error events) are synchronized with poll_cq | ||
399 | * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ. | ||
400 | * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq | ||
401 | * of the same QP while manipulating the flush list. | ||
402 | */ | ||
403 | spinlock_t flush_lock; /* QP flush management */ | ||
392 | }; | 404 | }; |
393 | 405 | ||
394 | #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) | 406 | #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 8329ec6a7946..80027a494730 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | |||
@@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, | |||
305 | err_event->res_err_state_reason); | 305 | err_event->res_err_state_reason); |
306 | if (!qp) | 306 | if (!qp) |
307 | break; | 307 | break; |
308 | bnxt_qplib_acquire_cq_locks(qp, &flags); | ||
309 | bnxt_qplib_mark_qp_error(qp); | 308 | bnxt_qplib_mark_qp_error(qp); |
310 | bnxt_qplib_release_cq_locks(qp, &flags); | 309 | rcfw->aeq_handler(rcfw, qp_event, qp); |
311 | break; | 310 | break; |
312 | default: | 311 | default: |
313 | /* Command Response */ | 312 | /* Command Response */ |
@@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, | |||
460 | int rc; | 459 | int rc; |
461 | 460 | ||
462 | RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); | 461 | RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); |
463 | 462 | /* Supply (log-base-2-of-host-page-size - base-page-shift) | |
463 | * to bono to adjust the doorbell page sizes. | ||
464 | */ | ||
465 | req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - | ||
466 | RCFW_DBR_BASE_PAGE_SHIFT); | ||
464 | /* | 467 | /* |
465 | * VFs need not setup the HW context area, PF | 468 | * VFs need not setup the HW context area, PF |
466 | * shall setup this area for VF. Skipping the | 469 | * shall setup this area for VF. Skipping the |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 6bee6e3636ea..c7cce2e4185e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | |||
@@ -49,6 +49,7 @@ | |||
49 | #define RCFW_COMM_SIZE 0x104 | 49 | #define RCFW_COMM_SIZE 0x104 |
50 | 50 | ||
51 | #define RCFW_DBR_PCI_BAR_REGION 2 | 51 | #define RCFW_DBR_PCI_BAR_REGION 2 |
52 | #define RCFW_DBR_BASE_PAGE_SHIFT 12 | ||
52 | 53 | ||
53 | #define RCFW_CMD_PREP(req, CMD, cmd_flags) \ | 54 | #define RCFW_CMD_PREP(req, CMD, cmd_flags) \ |
54 | do { \ | 55 | do { \ |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 03057983341f..ee98e5efef84 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c | |||
@@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, | |||
139 | attr->max_pkey = le32_to_cpu(sb->max_pkeys); | 139 | attr->max_pkey = le32_to_cpu(sb->max_pkeys); |
140 | 140 | ||
141 | attr->max_inline_data = le32_to_cpu(sb->max_inline_data); | 141 | attr->max_inline_data = le32_to_cpu(sb->max_inline_data); |
142 | attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; | 142 | attr->l2_db_size = (sb->l2_db_space_size + 1) * |
143 | (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); | ||
143 | attr->max_sgid = le32_to_cpu(sb->max_gid); | 144 | attr->max_sgid = le32_to_cpu(sb->max_gid); |
144 | 145 | ||
145 | bnxt_qplib_query_version(rcfw, attr->fw_ver); | 146 | bnxt_qplib_query_version(rcfw, attr->fw_ver); |
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 2d7ea096a247..3e5a4f760d0e 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h | |||
@@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw { | |||
1761 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) | 1761 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) |
1762 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) | 1762 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) |
1763 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) | 1763 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) |
1764 | __le16 reserved16; | 1764 | /* This value is (log-base-2-of-DBR-page-size - 12). |
1765 | * 0 for 4KB. HW supported values are enumerated below. | ||
1766 | */ | ||
1767 | __le16 log2_dbr_pg_size; | ||
1768 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL | ||
1769 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 | ||
1770 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL | ||
1771 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL | ||
1772 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL | ||
1773 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL | ||
1774 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL | ||
1775 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL | ||
1776 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL | ||
1777 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL | ||
1778 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL | ||
1779 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL | ||
1780 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL | ||
1781 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL | ||
1782 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL | ||
1783 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL | ||
1784 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL | ||
1785 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL | ||
1786 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \ | ||
1787 | CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M | ||
1765 | __le64 qpc_page_dir; | 1788 | __le64 qpc_page_dir; |
1766 | __le64 mrw_page_dir; | 1789 | __le64 mrw_page_dir; |
1767 | __le64 srq_page_dir; | 1790 | __le64 srq_page_dir; |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 9a566ee3ceff..82adc0d1d30e 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct | |||
601 | wc->dlid_path_bits = 0; | 601 | wc->dlid_path_bits = 0; |
602 | 602 | ||
603 | if (is_eth) { | 603 | if (is_eth) { |
604 | wc->slid = 0; | ||
604 | wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); | 605 | wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); |
605 | memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); | 606 | memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); |
606 | memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); | 607 | memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); |
@@ -851,7 +852,6 @@ repoll: | |||
851 | } | 852 | } |
852 | } | 853 | } |
853 | 854 | ||
854 | wc->slid = be16_to_cpu(cqe->rlid); | ||
855 | g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); | 855 | g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); |
856 | wc->src_qp = g_mlpath_rqpn & 0xffffff; | 856 | wc->src_qp = g_mlpath_rqpn & 0xffffff; |
857 | wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; | 857 | wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; |
@@ -860,6 +860,7 @@ repoll: | |||
860 | wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, | 860 | wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, |
861 | cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; | 861 | cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; |
862 | if (is_eth) { | 862 | if (is_eth) { |
863 | wc->slid = 0; | ||
863 | wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; | 864 | wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; |
864 | if (be32_to_cpu(cqe->vlan_my_qpn) & | 865 | if (be32_to_cpu(cqe->vlan_my_qpn) & |
865 | MLX4_CQE_CVLAN_PRESENT_MASK) { | 866 | MLX4_CQE_CVLAN_PRESENT_MASK) { |
@@ -871,6 +872,7 @@ repoll: | |||
871 | memcpy(wc->smac, cqe->smac, ETH_ALEN); | 872 | memcpy(wc->smac, cqe->smac, ETH_ALEN); |
872 | wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); | 873 | wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); |
873 | } else { | 874 | } else { |
875 | wc->slid = be16_to_cpu(cqe->rlid); | ||
874 | wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; | 876 | wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; |
875 | wc->vlan_id = 0xffff; | 877 | wc->vlan_id = 0xffff; |
876 | } | 878 | } |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8d2ee9322f2e..5a0e4fc4785a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, | |||
219 | gid_tbl[i].version = 2; | 219 | gid_tbl[i].version = 2; |
220 | if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) | 220 | if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) |
221 | gid_tbl[i].type = 1; | 221 | gid_tbl[i].type = 1; |
222 | else | ||
223 | memset(&gid_tbl[i].gid, 0, 12); | ||
224 | } | 222 | } |
225 | } | 223 | } |
226 | 224 | ||
@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device, | |||
366 | if (!gids) { | 364 | if (!gids) { |
367 | ret = -ENOMEM; | 365 | ret = -ENOMEM; |
368 | } else { | 366 | } else { |
369 | for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) | 367 | for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { |
370 | memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); | 368 | memcpy(&gids[i].gid, |
369 | &port_gid_table->gids[i].gid, | ||
370 | sizeof(union ib_gid)); | ||
371 | gids[i].gid_type = | ||
372 | port_gid_table->gids[i].gid_type; | ||
373 | } | ||
371 | } | 374 | } |
372 | } | 375 | } |
373 | spin_unlock_bh(&iboe->lock); | 376 | spin_unlock_bh(&iboe->lock); |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 5b974fb97611..15457c9569a7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, | |||
226 | wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); | 226 | wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); |
227 | break; | 227 | break; |
228 | } | 228 | } |
229 | wc->slid = be16_to_cpu(cqe->slid); | ||
230 | wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; | 229 | wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; |
231 | wc->dlid_path_bits = cqe->ml_path; | 230 | wc->dlid_path_bits = cqe->ml_path; |
232 | g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; | 231 | g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; |
@@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, | |||
241 | } | 240 | } |
242 | 241 | ||
243 | if (ll != IB_LINK_LAYER_ETHERNET) { | 242 | if (ll != IB_LINK_LAYER_ETHERNET) { |
243 | wc->slid = be16_to_cpu(cqe->slid); | ||
244 | wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; | 244 | wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; |
245 | return; | 245 | return; |
246 | } | 246 | } |
247 | 247 | ||
248 | wc->slid = 0; | ||
248 | vlan_present = cqe->l4_l3_hdr_type & 0x1; | 249 | vlan_present = cqe->l4_l3_hdr_type & 0x1; |
249 | roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; | 250 | roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; |
250 | if (vlan_present) { | 251 | if (vlan_present) { |
@@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | |||
1177 | if (ucmd.reserved0 || ucmd.reserved1) | 1178 | if (ucmd.reserved0 || ucmd.reserved1) |
1178 | return -EINVAL; | 1179 | return -EINVAL; |
1179 | 1180 | ||
1180 | umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, | 1181 | /* check multiplication overflow */ |
1182 | if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) | ||
1183 | return -EINVAL; | ||
1184 | |||
1185 | umem = ib_umem_get(context, ucmd.buf_addr, | ||
1186 | (size_t)ucmd.cqe_size * entries, | ||
1181 | IB_ACCESS_LOCAL_WRITE, 1); | 1187 | IB_ACCESS_LOCAL_WRITE, 1); |
1182 | if (IS_ERR(umem)) { | 1188 | if (IS_ERR(umem)) { |
1183 | err = PTR_ERR(umem); | 1189 | err = PTR_ERR(umem); |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4236c8086820..033b6af90de9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, | |||
245 | struct mlx5_ib_multiport_info *mpi; | 245 | struct mlx5_ib_multiport_info *mpi; |
246 | struct mlx5_ib_port *port; | 246 | struct mlx5_ib_port *port; |
247 | 247 | ||
248 | if (!mlx5_core_mp_enabled(ibdev->mdev) || | ||
249 | ll != IB_LINK_LAYER_ETHERNET) { | ||
250 | if (native_port_num) | ||
251 | *native_port_num = ib_port_num; | ||
252 | return ibdev->mdev; | ||
253 | } | ||
254 | |||
248 | if (native_port_num) | 255 | if (native_port_num) |
249 | *native_port_num = 1; | 256 | *native_port_num = 1; |
250 | 257 | ||
251 | if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) | ||
252 | return ibdev->mdev; | ||
253 | |||
254 | port = &ibdev->port[ib_port_num - 1]; | 258 | port = &ibdev->port[ib_port_num - 1]; |
255 | if (!port) | 259 | if (!port) |
256 | return NULL; | 260 | return NULL; |
@@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) | |||
3263 | struct mlx5_ib_dev *ibdev; | 3267 | struct mlx5_ib_dev *ibdev; |
3264 | struct ib_event ibev; | 3268 | struct ib_event ibev; |
3265 | bool fatal = false; | 3269 | bool fatal = false; |
3266 | u8 port = 0; | 3270 | u8 port = (u8)work->param; |
3267 | 3271 | ||
3268 | if (mlx5_core_is_mp_slave(work->dev)) { | 3272 | if (mlx5_core_is_mp_slave(work->dev)) { |
3269 | ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); | 3273 | ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); |
@@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work) | |||
3283 | case MLX5_DEV_EVENT_PORT_UP: | 3287 | case MLX5_DEV_EVENT_PORT_UP: |
3284 | case MLX5_DEV_EVENT_PORT_DOWN: | 3288 | case MLX5_DEV_EVENT_PORT_DOWN: |
3285 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | 3289 | case MLX5_DEV_EVENT_PORT_INITIALIZED: |
3286 | port = (u8)work->param; | ||
3287 | |||
3288 | /* In RoCE, port up/down events are handled in | 3290 | /* In RoCE, port up/down events are handled in |
3289 | * mlx5_netdev_event(). | 3291 | * mlx5_netdev_event(). |
3290 | */ | 3292 | */ |
@@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work) | |||
3298 | 3300 | ||
3299 | case MLX5_DEV_EVENT_LID_CHANGE: | 3301 | case MLX5_DEV_EVENT_LID_CHANGE: |
3300 | ibev.event = IB_EVENT_LID_CHANGE; | 3302 | ibev.event = IB_EVENT_LID_CHANGE; |
3301 | port = (u8)work->param; | ||
3302 | break; | 3303 | break; |
3303 | 3304 | ||
3304 | case MLX5_DEV_EVENT_PKEY_CHANGE: | 3305 | case MLX5_DEV_EVENT_PKEY_CHANGE: |
3305 | ibev.event = IB_EVENT_PKEY_CHANGE; | 3306 | ibev.event = IB_EVENT_PKEY_CHANGE; |
3306 | port = (u8)work->param; | ||
3307 | |||
3308 | schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); | 3307 | schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); |
3309 | break; | 3308 | break; |
3310 | 3309 | ||
3311 | case MLX5_DEV_EVENT_GUID_CHANGE: | 3310 | case MLX5_DEV_EVENT_GUID_CHANGE: |
3312 | ibev.event = IB_EVENT_GID_CHANGE; | 3311 | ibev.event = IB_EVENT_GID_CHANGE; |
3313 | port = (u8)work->param; | ||
3314 | break; | 3312 | break; |
3315 | 3313 | ||
3316 | case MLX5_DEV_EVENT_CLIENT_REREG: | 3314 | case MLX5_DEV_EVENT_CLIENT_REREG: |
3317 | ibev.event = IB_EVENT_CLIENT_REREGISTER; | 3315 | ibev.event = IB_EVENT_CLIENT_REREGISTER; |
3318 | port = (u8)work->param; | ||
3319 | break; | 3316 | break; |
3320 | case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: | 3317 | case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: |
3321 | schedule_work(&ibdev->delay_drop.delay_drop_work); | 3318 | schedule_work(&ibdev->delay_drop.delay_drop_work); |
@@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) | |||
3327 | ibev.device = &ibdev->ib_dev; | 3324 | ibev.device = &ibdev->ib_dev; |
3328 | ibev.element.port_num = port; | 3325 | ibev.element.port_num = port; |
3329 | 3326 | ||
3330 | if (port < 1 || port > ibdev->num_ports) { | 3327 | if (!rdma_is_port_valid(&ibdev->ib_dev, port)) { |
3331 | mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); | 3328 | mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); |
3332 | goto out; | 3329 | goto out; |
3333 | } | 3330 | } |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 556e015678de..1961c6a45437 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -1816,7 +1816,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, | |||
1816 | 1816 | ||
1817 | mr->ibmr.iova = sg_dma_address(sg) + sg_offset; | 1817 | mr->ibmr.iova = sg_dma_address(sg) + sg_offset; |
1818 | mr->ibmr.length = 0; | 1818 | mr->ibmr.length = 0; |
1819 | mr->ndescs = sg_nents; | ||
1820 | 1819 | ||
1821 | for_each_sg(sgl, sg, sg_nents, i) { | 1820 | for_each_sg(sgl, sg, sg_nents, i) { |
1822 | if (unlikely(i >= mr->max_descs)) | 1821 | if (unlikely(i >= mr->max_descs)) |
@@ -1828,6 +1827,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, | |||
1828 | 1827 | ||
1829 | sg_offset = 0; | 1828 | sg_offset = 0; |
1830 | } | 1829 | } |
1830 | mr->ndescs = i; | ||
1831 | 1831 | ||
1832 | if (sg_offset_p) | 1832 | if (sg_offset_p) |
1833 | *sg_offset_p = sg_offset; | 1833 | *sg_offset_p = sg_offset; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 39d24bf694a8..36197fbac63a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -1584,6 +1584,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1584 | u32 uidx = MLX5_IB_DEFAULT_UIDX; | 1584 | u32 uidx = MLX5_IB_DEFAULT_UIDX; |
1585 | struct mlx5_ib_create_qp ucmd; | 1585 | struct mlx5_ib_create_qp ucmd; |
1586 | struct mlx5_ib_qp_base *base; | 1586 | struct mlx5_ib_qp_base *base; |
1587 | int mlx5_st; | ||
1587 | void *qpc; | 1588 | void *qpc; |
1588 | u32 *in; | 1589 | u32 *in; |
1589 | int err; | 1590 | int err; |
@@ -1592,6 +1593,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1592 | spin_lock_init(&qp->sq.lock); | 1593 | spin_lock_init(&qp->sq.lock); |
1593 | spin_lock_init(&qp->rq.lock); | 1594 | spin_lock_init(&qp->rq.lock); |
1594 | 1595 | ||
1596 | mlx5_st = to_mlx5_st(init_attr->qp_type); | ||
1597 | if (mlx5_st < 0) | ||
1598 | return -EINVAL; | ||
1599 | |||
1595 | if (init_attr->rwq_ind_tbl) { | 1600 | if (init_attr->rwq_ind_tbl) { |
1596 | if (!udata) | 1601 | if (!udata) |
1597 | return -ENOSYS; | 1602 | return -ENOSYS; |
@@ -1753,7 +1758,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1753 | 1758 | ||
1754 | qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); | 1759 | qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); |
1755 | 1760 | ||
1756 | MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); | 1761 | MLX5_SET(qpc, qpc, st, mlx5_st); |
1757 | MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); | 1762 | MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); |
1758 | 1763 | ||
1759 | if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) | 1764 | if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) |
@@ -3095,8 +3100,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
3095 | goto out; | 3100 | goto out; |
3096 | 3101 | ||
3097 | if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || | 3102 | if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || |
3098 | !optab[mlx5_cur][mlx5_new]) | 3103 | !optab[mlx5_cur][mlx5_new]) { |
3104 | err = -EINVAL; | ||
3099 | goto out; | 3105 | goto out; |
3106 | } | ||
3100 | 3107 | ||
3101 | op = optab[mlx5_cur][mlx5_new]; | 3108 | op = optab[mlx5_cur][mlx5_new]; |
3102 | optpar = ib_mask_to_mlx5_opt(attr_mask); | 3109 | optpar = ib_mask_to_mlx5_opt(attr_mask); |
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 478b7317b80a..26dc374787f7 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c | |||
@@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev, | |||
458 | } | 458 | } |
459 | return -EINVAL; | 459 | return -EINVAL; |
460 | } | 460 | } |
461 | neigh = dst_neigh_lookup(dst, &dst_in); | 461 | neigh = dst_neigh_lookup(dst, &fl6.daddr); |
462 | |||
463 | if (neigh) { | 462 | if (neigh) { |
464 | rcu_read_lock(); | 463 | rcu_read_lock(); |
465 | if (neigh->nud_state & NUD_VALID) { | 464 | if (neigh->nud_state & NUD_VALID) { |
@@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
494 | 493 | ||
495 | qp = idr_find(&dev->qpidr, conn_param->qpn); | 494 | qp = idr_find(&dev->qpidr, conn_param->qpn); |
496 | 495 | ||
497 | laddr = (struct sockaddr_in *)&cm_id->local_addr; | 496 | laddr = (struct sockaddr_in *)&cm_id->m_local_addr; |
498 | raddr = (struct sockaddr_in *)&cm_id->remote_addr; | 497 | raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; |
499 | laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; | 498 | laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; |
500 | raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; | 499 | raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; |
500 | |||
501 | DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n", | ||
502 | ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port), | ||
503 | ntohs(raddr->sin_port)); | ||
501 | 504 | ||
502 | DP_DEBUG(dev, QEDR_MSG_IWARP, | 505 | DP_DEBUG(dev, QEDR_MSG_IWARP, |
503 | "Connect source address: %pISpc, remote address: %pISpc\n", | 506 | "Connect source address: %pISpc, remote address: %pISpc\n", |
@@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
599 | int rc; | 602 | int rc; |
600 | int i; | 603 | int i; |
601 | 604 | ||
602 | laddr = (struct sockaddr_in *)&cm_id->local_addr; | 605 | laddr = (struct sockaddr_in *)&cm_id->m_local_addr; |
603 | laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; | 606 | laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; |
604 | 607 | ||
605 | DP_DEBUG(dev, QEDR_MSG_IWARP, | 608 | DP_DEBUG(dev, QEDR_MSG_IWARP, |
606 | "Create Listener address: %pISpc\n", &cm_id->local_addr); | 609 | "Create Listener address: %pISpc\n", &cm_id->local_addr); |
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 53f00dbf313f..875b17272d65 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
@@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3034 | 3034 | ||
3035 | switch (wr->opcode) { | 3035 | switch (wr->opcode) { |
3036 | case IB_WR_SEND_WITH_IMM: | 3036 | case IB_WR_SEND_WITH_IMM: |
3037 | if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { | ||
3038 | rc = -EINVAL; | ||
3039 | *bad_wr = wr; | ||
3040 | break; | ||
3041 | } | ||
3037 | wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; | 3042 | wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; |
3038 | swqe = (struct rdma_sq_send_wqe_1st *)wqe; | 3043 | swqe = (struct rdma_sq_send_wqe_1st *)wqe; |
3039 | swqe->wqe_size = 2; | 3044 | swqe->wqe_size = 2; |
@@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3075 | break; | 3080 | break; |
3076 | 3081 | ||
3077 | case IB_WR_RDMA_WRITE_WITH_IMM: | 3082 | case IB_WR_RDMA_WRITE_WITH_IMM: |
3083 | if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { | ||
3084 | rc = -EINVAL; | ||
3085 | *bad_wr = wr; | ||
3086 | break; | ||
3087 | } | ||
3078 | wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; | 3088 | wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; |
3079 | rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; | 3089 | rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; |
3080 | 3090 | ||
@@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
3724 | { | 3734 | { |
3725 | struct qedr_dev *dev = get_qedr_dev(ibcq->device); | 3735 | struct qedr_dev *dev = get_qedr_dev(ibcq->device); |
3726 | struct qedr_cq *cq = get_qedr_cq(ibcq); | 3736 | struct qedr_cq *cq = get_qedr_cq(ibcq); |
3727 | union rdma_cqe *cqe = cq->latest_cqe; | 3737 | union rdma_cqe *cqe; |
3728 | u32 old_cons, new_cons; | 3738 | u32 old_cons, new_cons; |
3729 | unsigned long flags; | 3739 | unsigned long flags; |
3730 | int update = 0; | 3740 | int update = 0; |
@@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
3741 | return qedr_gsi_poll_cq(ibcq, num_entries, wc); | 3751 | return qedr_gsi_poll_cq(ibcq, num_entries, wc); |
3742 | 3752 | ||
3743 | spin_lock_irqsave(&cq->cq_lock, flags); | 3753 | spin_lock_irqsave(&cq->cq_lock, flags); |
3754 | cqe = cq->latest_cqe; | ||
3744 | old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); | 3755 | old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); |
3745 | while (num_entries && is_valid_cqe(cq, cqe)) { | 3756 | while (num_entries && is_valid_cqe(cq, cqe)) { |
3746 | struct qedr_qp *qp; | 3757 | struct qedr_qp *qp; |
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 1b2e5362a3ff..cc429b567d0a 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c | |||
@@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t) | |||
489 | unsigned long timeout; | 489 | unsigned long timeout; |
490 | struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); | 490 | struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); |
491 | 491 | ||
492 | if (percpu_ref_is_zero(&mr->refcount)) | 492 | if (mr->lkey) { |
493 | return 0; | 493 | /* avoid dma mr */ |
494 | /* avoid dma mr */ | ||
495 | if (mr->lkey) | ||
496 | rvt_dereg_clean_qps(mr); | 494 | rvt_dereg_clean_qps(mr); |
495 | /* @mr was indexed on rcu protected @lkey_table */ | ||
496 | synchronize_rcu(); | ||
497 | } | ||
498 | |||
497 | timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); | 499 | timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); |
498 | if (!timeout) { | 500 | if (!timeout) { |
499 | rvt_pr_err(rdi, | 501 | rvt_pr_err(rdi, |
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 1f316d66e6f7..41614c185918 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
@@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev) | |||
218 | { | 218 | { |
219 | struct matrix_keypad *keypad = input_get_drvdata(dev); | 219 | struct matrix_keypad *keypad = input_get_drvdata(dev); |
220 | 220 | ||
221 | spin_lock_irq(&keypad->lock); | ||
221 | keypad->stopped = true; | 222 | keypad->stopped = true; |
222 | mb(); | 223 | spin_unlock_irq(&keypad->lock); |
224 | |||
223 | flush_work(&keypad->work.work); | 225 | flush_work(&keypad->work.work); |
224 | /* | 226 | /* |
225 | * matrix_keypad_scan() will leave IRQs enabled; | 227 | * matrix_keypad_scan() will leave IRQs enabled; |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 3d2e23a0ae39..a246fc686bb7 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -173,7 +173,6 @@ static const char * const smbus_pnp_ids[] = { | |||
173 | "LEN0046", /* X250 */ | 173 | "LEN0046", /* X250 */ |
174 | "LEN004a", /* W541 */ | 174 | "LEN004a", /* W541 */ |
175 | "LEN200f", /* T450s */ | 175 | "LEN200f", /* T450s */ |
176 | "LEN2018", /* T460p */ | ||
177 | NULL | 176 | NULL |
178 | }; | 177 | }; |
179 | 178 | ||
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index db4f6bb502e3..a5ab774da4cc 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | * Copyright (C) 2012 Samsung Electronics Co.Ltd | 2 | // Melfas MMS114/MMS152 touchscreen device driver |
3 | * Author: Joonyoung Shim <jy0922.shim@samsung.com> | 3 | // |
4 | * | 4 | // Copyright (c) 2012 Samsung Electronics Co., Ltd. |
5 | * This program is free software; you can redistribute it and/or modify | 5 | // Author: Joonyoung Shim <jy0922.shim@samsung.com> |
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | 6 | ||
10 | #include <linux/module.h> | 7 | #include <linux/module.h> |
11 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
@@ -624,4 +621,4 @@ module_i2c_driver(mms114_driver); | |||
624 | /* Module information */ | 621 | /* Module information */ |
625 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | 622 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); |
626 | MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver"); | 623 | MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver"); |
627 | MODULE_LICENSE("GPL"); | 624 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 1d3056f53747..2cbb19cddbf8 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -1412,7 +1412,7 @@ static struct irq_chip its_irq_chip = { | |||
1412 | * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. | 1412 | * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. |
1413 | */ | 1413 | */ |
1414 | #define IRQS_PER_CHUNK_SHIFT 5 | 1414 | #define IRQS_PER_CHUNK_SHIFT 5 |
1415 | #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) | 1415 | #define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) |
1416 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ | 1416 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ |
1417 | 1417 | ||
1418 | static unsigned long *lpi_bitmap; | 1418 | static unsigned long *lpi_bitmap; |
@@ -2119,11 +2119,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
2119 | 2119 | ||
2120 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 2120 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
2121 | /* | 2121 | /* |
2122 | * At least one bit of EventID is being used, hence a minimum | 2122 | * We allocate at least one chunk worth of LPIs bet device, |
2123 | * of two entries. No, the architecture doesn't let you | 2123 | * and thus that many ITEs. The device may require less though. |
2124 | * express an ITT with a single entry. | ||
2125 | */ | 2124 | */ |
2126 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); | 2125 | nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); |
2127 | sz = nr_ites * its->ite_size; | 2126 | sz = nr_ites * its->ite_size; |
2128 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; | 2127 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
2129 | itt = kzalloc(sz, GFP_KERNEL); | 2128 | itt = kzalloc(sz, GFP_KERNEL); |
@@ -2495,7 +2494,7 @@ static int its_vpe_set_affinity(struct irq_data *d, | |||
2495 | 2494 | ||
2496 | static void its_vpe_schedule(struct its_vpe *vpe) | 2495 | static void its_vpe_schedule(struct its_vpe *vpe) |
2497 | { | 2496 | { |
2498 | void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); | 2497 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
2499 | u64 val; | 2498 | u64 val; |
2500 | 2499 | ||
2501 | /* Schedule the VPE */ | 2500 | /* Schedule the VPE */ |
@@ -2527,7 +2526,7 @@ static void its_vpe_schedule(struct its_vpe *vpe) | |||
2527 | 2526 | ||
2528 | static void its_vpe_deschedule(struct its_vpe *vpe) | 2527 | static void its_vpe_deschedule(struct its_vpe *vpe) |
2529 | { | 2528 | { |
2530 | void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); | 2529 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
2531 | u32 count = 1000000; /* 1s! */ | 2530 | u32 count = 1000000; /* 1s! */ |
2532 | bool clean; | 2531 | bool clean; |
2533 | u64 val; | 2532 | u64 val; |
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 675eda5ff2b8..4760307ab43f 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c | |||
@@ -28,20 +28,6 @@ struct gpcv2_irqchip_data { | |||
28 | 28 | ||
29 | static struct gpcv2_irqchip_data *imx_gpcv2_instance; | 29 | static struct gpcv2_irqchip_data *imx_gpcv2_instance; |
30 | 30 | ||
31 | /* | ||
32 | * Interface for the low level wakeup code. | ||
33 | */ | ||
34 | u32 imx_gpcv2_get_wakeup_source(u32 **sources) | ||
35 | { | ||
36 | if (!imx_gpcv2_instance) | ||
37 | return 0; | ||
38 | |||
39 | if (sources) | ||
40 | *sources = imx_gpcv2_instance->wakeup_sources; | ||
41 | |||
42 | return IMR_NUM; | ||
43 | } | ||
44 | |||
45 | static int gpcv2_wakeup_source_save(void) | 31 | static int gpcv2_wakeup_source_save(void) |
46 | { | 32 | { |
47 | struct gpcv2_irqchip_data *cd; | 33 | struct gpcv2_irqchip_data *cd; |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 1a46b41dac70..6422846b546e 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -659,11 +659,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio) | |||
659 | static void search_free(struct closure *cl) | 659 | static void search_free(struct closure *cl) |
660 | { | 660 | { |
661 | struct search *s = container_of(cl, struct search, cl); | 661 | struct search *s = container_of(cl, struct search, cl); |
662 | bio_complete(s); | ||
663 | 662 | ||
664 | if (s->iop.bio) | 663 | if (s->iop.bio) |
665 | bio_put(s->iop.bio); | 664 | bio_put(s->iop.bio); |
666 | 665 | ||
666 | bio_complete(s); | ||
667 | closure_debug_destroy(cl); | 667 | closure_debug_destroy(cl); |
668 | mempool_free(s, s->d->c->search); | 668 | mempool_free(s, s->d->c->search); |
669 | } | 669 | } |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 312895788036..f2273143b3cb 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, | |||
963 | uint32_t rtime = cpu_to_le32(get_seconds()); | 963 | uint32_t rtime = cpu_to_le32(get_seconds()); |
964 | struct uuid_entry *u; | 964 | struct uuid_entry *u; |
965 | char buf[BDEVNAME_SIZE]; | 965 | char buf[BDEVNAME_SIZE]; |
966 | struct cached_dev *exist_dc, *t; | ||
966 | 967 | ||
967 | bdevname(dc->bdev, buf); | 968 | bdevname(dc->bdev, buf); |
968 | 969 | ||
@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, | |||
987 | return -EINVAL; | 988 | return -EINVAL; |
988 | } | 989 | } |
989 | 990 | ||
991 | /* Check whether already attached */ | ||
992 | list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { | ||
993 | if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { | ||
994 | pr_err("Tried to attach %s but duplicate UUID already attached", | ||
995 | buf); | ||
996 | |||
997 | return -EINVAL; | ||
998 | } | ||
999 | } | ||
1000 | |||
990 | u = uuid_find(c, dc->sb.uuid); | 1001 | u = uuid_find(c, dc->sb.uuid); |
991 | 1002 | ||
992 | if (u && | 1003 | if (u && |
@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, | |||
1204 | 1215 | ||
1205 | return; | 1216 | return; |
1206 | err: | 1217 | err: |
1207 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); | 1218 | pr_notice("error %s: %s", bdevname(bdev, name), err); |
1208 | bcache_device_stop(&dc->disk); | 1219 | bcache_device_stop(&dc->disk); |
1209 | } | 1220 | } |
1210 | 1221 | ||
@@ -1274,7 +1285,7 @@ static int flash_devs_run(struct cache_set *c) | |||
1274 | struct uuid_entry *u; | 1285 | struct uuid_entry *u; |
1275 | 1286 | ||
1276 | for (u = c->uuids; | 1287 | for (u = c->uuids; |
1277 | u < c->uuids + c->devices_max_used && !ret; | 1288 | u < c->uuids + c->nr_uuids && !ret; |
1278 | u++) | 1289 | u++) |
1279 | if (UUID_FLASH_ONLY(u)) | 1290 | if (UUID_FLASH_ONLY(u)) |
1280 | ret = flash_dev_run(c, u); | 1291 | ret = flash_dev_run(c, u); |
@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1883 | const char *err = NULL; /* must be set for any error case */ | 1894 | const char *err = NULL; /* must be set for any error case */ |
1884 | int ret = 0; | 1895 | int ret = 0; |
1885 | 1896 | ||
1897 | bdevname(bdev, name); | ||
1898 | |||
1886 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); | 1899 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
1887 | ca->bdev = bdev; | 1900 | ca->bdev = bdev; |
1888 | ca->bdev->bd_holder = ca; | 1901 | ca->bdev->bd_holder = ca; |
@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1891 | bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; | 1904 | bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; |
1892 | get_page(sb_page); | 1905 | get_page(sb_page); |
1893 | 1906 | ||
1894 | if (blk_queue_discard(bdev_get_queue(ca->bdev))) | 1907 | if (blk_queue_discard(bdev_get_queue(bdev))) |
1895 | ca->discard = CACHE_DISCARD(&ca->sb); | 1908 | ca->discard = CACHE_DISCARD(&ca->sb); |
1896 | 1909 | ||
1897 | ret = cache_alloc(ca); | 1910 | ret = cache_alloc(ca); |
1898 | if (ret != 0) { | 1911 | if (ret != 0) { |
1912 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | ||
1899 | if (ret == -ENOMEM) | 1913 | if (ret == -ENOMEM) |
1900 | err = "cache_alloc(): -ENOMEM"; | 1914 | err = "cache_alloc(): -ENOMEM"; |
1901 | else | 1915 | else |
@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1918 | goto out; | 1932 | goto out; |
1919 | } | 1933 | } |
1920 | 1934 | ||
1921 | pr_info("registered cache device %s", bdevname(bdev, name)); | 1935 | pr_info("registered cache device %s", name); |
1922 | 1936 | ||
1923 | out: | 1937 | out: |
1924 | kobject_put(&ca->kobj); | 1938 | kobject_put(&ca->kobj); |
1925 | 1939 | ||
1926 | err: | 1940 | err: |
1927 | if (err) | 1941 | if (err) |
1928 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); | 1942 | pr_notice("error %s: %s", name, err); |
1929 | 1943 | ||
1930 | return ret; | 1944 | return ret; |
1931 | } | 1945 | } |
@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | |||
2014 | if (err) | 2028 | if (err) |
2015 | goto err_close; | 2029 | goto err_close; |
2016 | 2030 | ||
2031 | err = "failed to register device"; | ||
2017 | if (SB_IS_BDEV(sb)) { | 2032 | if (SB_IS_BDEV(sb)) { |
2018 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); | 2033 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); |
2019 | if (!dc) | 2034 | if (!dc) |
@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | |||
2028 | goto err_close; | 2043 | goto err_close; |
2029 | 2044 | ||
2030 | if (register_cache(sb, sb_page, bdev, ca) != 0) | 2045 | if (register_cache(sb, sb_page, bdev, ca) != 0) |
2031 | goto err_close; | 2046 | goto err; |
2032 | } | 2047 | } |
2033 | out: | 2048 | out: |
2034 | if (sb_page) | 2049 | if (sb_page) |
@@ -2041,7 +2056,7 @@ out: | |||
2041 | err_close: | 2056 | err_close: |
2042 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | 2057 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
2043 | err: | 2058 | err: |
2044 | pr_info("error opening %s: %s", path, err); | 2059 | pr_info("error %s: %s", path, err); |
2045 | ret = -EINVAL; | 2060 | ret = -EINVAL; |
2046 | goto out; | 2061 | goto out; |
2047 | } | 2062 | } |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 414c9af54ded..aa2032fa80d4 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -386,9 +386,6 @@ static void __cache_size_refresh(void) | |||
386 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, | 386 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, |
387 | enum data_mode *data_mode) | 387 | enum data_mode *data_mode) |
388 | { | 388 | { |
389 | unsigned noio_flag; | ||
390 | void *ptr; | ||
391 | |||
392 | if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { | 389 | if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { |
393 | *data_mode = DATA_MODE_SLAB; | 390 | *data_mode = DATA_MODE_SLAB; |
394 | return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); | 391 | return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); |
@@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, | |||
412 | * all allocations done by this process (including pagetables) are done | 409 | * all allocations done by this process (including pagetables) are done |
413 | * as if GFP_NOIO was specified. | 410 | * as if GFP_NOIO was specified. |
414 | */ | 411 | */ |
412 | if (gfp_mask & __GFP_NORETRY) { | ||
413 | unsigned noio_flag = memalloc_noio_save(); | ||
414 | void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); | ||
415 | 415 | ||
416 | if (gfp_mask & __GFP_NORETRY) | ||
417 | noio_flag = memalloc_noio_save(); | ||
418 | |||
419 | ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); | ||
420 | |||
421 | if (gfp_mask & __GFP_NORETRY) | ||
422 | memalloc_noio_restore(noio_flag); | 416 | memalloc_noio_restore(noio_flag); |
417 | return ptr; | ||
418 | } | ||
423 | 419 | ||
424 | return ptr; | 420 | return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); |
425 | } | 421 | } |
426 | 422 | ||
427 | /* | 423 | /* |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 7d3e572072f5..a05a560d3cba 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -211,29 +211,27 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) | |||
211 | else | 211 | else |
212 | m->queue_mode = DM_TYPE_REQUEST_BASED; | 212 | m->queue_mode = DM_TYPE_REQUEST_BASED; |
213 | 213 | ||
214 | } else if (m->queue_mode == DM_TYPE_BIO_BASED || | 214 | } else if (m->queue_mode == DM_TYPE_BIO_BASED) { |
215 | m->queue_mode == DM_TYPE_NVME_BIO_BASED) { | ||
216 | INIT_WORK(&m->process_queued_bios, process_queued_bios); | 215 | INIT_WORK(&m->process_queued_bios, process_queued_bios); |
217 | 216 | /* | |
218 | if (m->queue_mode == DM_TYPE_BIO_BASED) { | 217 | * bio-based doesn't support any direct scsi_dh management; |
219 | /* | 218 | * it just discovers if a scsi_dh is attached. |
220 | * bio-based doesn't support any direct scsi_dh management; | 219 | */ |
221 | * it just discovers if a scsi_dh is attached. | 220 | set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); |
222 | */ | ||
223 | set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { | ||
228 | set_bit(MPATHF_QUEUE_IO, &m->flags); | ||
229 | atomic_set(&m->pg_init_in_progress, 0); | ||
230 | atomic_set(&m->pg_init_count, 0); | ||
231 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; | ||
232 | init_waitqueue_head(&m->pg_init_wait); | ||
233 | } | 221 | } |
234 | 222 | ||
235 | dm_table_set_type(ti->table, m->queue_mode); | 223 | dm_table_set_type(ti->table, m->queue_mode); |
236 | 224 | ||
225 | /* | ||
226 | * Init fields that are only used when a scsi_dh is attached | ||
227 | * - must do this unconditionally (really doesn't hurt non-SCSI uses) | ||
228 | */ | ||
229 | set_bit(MPATHF_QUEUE_IO, &m->flags); | ||
230 | atomic_set(&m->pg_init_in_progress, 0); | ||
231 | atomic_set(&m->pg_init_count, 0); | ||
232 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; | ||
233 | init_waitqueue_head(&m->pg_init_wait); | ||
234 | |||
237 | return 0; | 235 | return 0; |
238 | } | 236 | } |
239 | 237 | ||
@@ -337,9 +335,6 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg) | |||
337 | { | 335 | { |
338 | m->current_pg = pg; | 336 | m->current_pg = pg; |
339 | 337 | ||
340 | if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) | ||
341 | return; | ||
342 | |||
343 | /* Must we initialise the PG first, and queue I/O till it's ready? */ | 338 | /* Must we initialise the PG first, and queue I/O till it's ready? */ |
344 | if (m->hw_handler_name) { | 339 | if (m->hw_handler_name) { |
345 | set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); | 340 | set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); |
@@ -385,8 +380,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) | |||
385 | unsigned bypassed = 1; | 380 | unsigned bypassed = 1; |
386 | 381 | ||
387 | if (!atomic_read(&m->nr_valid_paths)) { | 382 | if (!atomic_read(&m->nr_valid_paths)) { |
388 | if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) | 383 | clear_bit(MPATHF_QUEUE_IO, &m->flags); |
389 | clear_bit(MPATHF_QUEUE_IO, &m->flags); | ||
390 | goto failed; | 384 | goto failed; |
391 | } | 385 | } |
392 | 386 | ||
@@ -599,7 +593,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) | |||
599 | return pgpath; | 593 | return pgpath; |
600 | } | 594 | } |
601 | 595 | ||
602 | static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio) | 596 | static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio) |
603 | { | 597 | { |
604 | struct pgpath *pgpath; | 598 | struct pgpath *pgpath; |
605 | unsigned long flags; | 599 | unsigned long flags; |
@@ -634,8 +628,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, | |||
634 | { | 628 | { |
635 | struct pgpath *pgpath; | 629 | struct pgpath *pgpath; |
636 | 630 | ||
637 | if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) | 631 | if (!m->hw_handler_name) |
638 | pgpath = __map_bio_nvme(m, bio); | 632 | pgpath = __map_bio_fast(m, bio); |
639 | else | 633 | else |
640 | pgpath = __map_bio(m, bio); | 634 | pgpath = __map_bio(m, bio); |
641 | 635 | ||
@@ -675,8 +669,7 @@ static void process_queued_io_list(struct multipath *m) | |||
675 | { | 669 | { |
676 | if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) | 670 | if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) |
677 | dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); | 671 | dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); |
678 | else if (m->queue_mode == DM_TYPE_BIO_BASED || | 672 | else if (m->queue_mode == DM_TYPE_BIO_BASED) |
679 | m->queue_mode == DM_TYPE_NVME_BIO_BASED) | ||
680 | queue_work(kmultipathd, &m->process_queued_bios); | 673 | queue_work(kmultipathd, &m->process_queued_bios); |
681 | } | 674 | } |
682 | 675 | ||
@@ -811,15 +804,14 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, | |||
811 | return 0; | 804 | return 0; |
812 | } | 805 | } |
813 | 806 | ||
814 | static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error) | 807 | static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, |
808 | const char *attached_handler_name, char **error) | ||
815 | { | 809 | { |
816 | struct request_queue *q = bdev_get_queue(bdev); | 810 | struct request_queue *q = bdev_get_queue(bdev); |
817 | const char *attached_handler_name; | ||
818 | int r; | 811 | int r; |
819 | 812 | ||
820 | if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { | 813 | if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { |
821 | retain: | 814 | retain: |
822 | attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); | ||
823 | if (attached_handler_name) { | 815 | if (attached_handler_name) { |
824 | /* | 816 | /* |
825 | * Clear any hw_handler_params associated with a | 817 | * Clear any hw_handler_params associated with a |
@@ -873,6 +865,8 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps | |||
873 | int r; | 865 | int r; |
874 | struct pgpath *p; | 866 | struct pgpath *p; |
875 | struct multipath *m = ti->private; | 867 | struct multipath *m = ti->private; |
868 | struct request_queue *q; | ||
869 | const char *attached_handler_name; | ||
876 | 870 | ||
877 | /* we need at least a path arg */ | 871 | /* we need at least a path arg */ |
878 | if (as->argc < 1) { | 872 | if (as->argc < 1) { |
@@ -891,9 +885,11 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps | |||
891 | goto bad; | 885 | goto bad; |
892 | } | 886 | } |
893 | 887 | ||
894 | if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { | 888 | q = bdev_get_queue(p->path.dev->bdev); |
889 | attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); | ||
890 | if (attached_handler_name) { | ||
895 | INIT_DELAYED_WORK(&p->activate_path, activate_path_work); | 891 | INIT_DELAYED_WORK(&p->activate_path, activate_path_work); |
896 | r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); | 892 | r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error); |
897 | if (r) { | 893 | if (r) { |
898 | dm_put_device(ti, p->path.dev); | 894 | dm_put_device(ti, p->path.dev); |
899 | goto bad; | 895 | goto bad; |
@@ -1001,8 +997,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) | |||
1001 | if (!hw_argc) | 997 | if (!hw_argc) |
1002 | return 0; | 998 | return 0; |
1003 | 999 | ||
1004 | if (m->queue_mode == DM_TYPE_BIO_BASED || | 1000 | if (m->queue_mode == DM_TYPE_BIO_BASED) { |
1005 | m->queue_mode == DM_TYPE_NVME_BIO_BASED) { | ||
1006 | dm_consume_args(as, hw_argc); | 1001 | dm_consume_args(as, hw_argc); |
1007 | DMERR("bio-based multipath doesn't allow hardware handler args"); | 1002 | DMERR("bio-based multipath doesn't allow hardware handler args"); |
1008 | return 0; | 1003 | return 0; |
@@ -1091,8 +1086,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) | |||
1091 | 1086 | ||
1092 | if (!strcasecmp(queue_mode_name, "bio")) | 1087 | if (!strcasecmp(queue_mode_name, "bio")) |
1093 | m->queue_mode = DM_TYPE_BIO_BASED; | 1088 | m->queue_mode = DM_TYPE_BIO_BASED; |
1094 | else if (!strcasecmp(queue_mode_name, "nvme")) | ||
1095 | m->queue_mode = DM_TYPE_NVME_BIO_BASED; | ||
1096 | else if (!strcasecmp(queue_mode_name, "rq")) | 1089 | else if (!strcasecmp(queue_mode_name, "rq")) |
1097 | m->queue_mode = DM_TYPE_REQUEST_BASED; | 1090 | m->queue_mode = DM_TYPE_REQUEST_BASED; |
1098 | else if (!strcasecmp(queue_mode_name, "mq")) | 1091 | else if (!strcasecmp(queue_mode_name, "mq")) |
@@ -1193,7 +1186,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
1193 | ti->num_discard_bios = 1; | 1186 | ti->num_discard_bios = 1; |
1194 | ti->num_write_same_bios = 1; | 1187 | ti->num_write_same_bios = 1; |
1195 | ti->num_write_zeroes_bios = 1; | 1188 | ti->num_write_zeroes_bios = 1; |
1196 | if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED) | 1189 | if (m->queue_mode == DM_TYPE_BIO_BASED) |
1197 | ti->per_io_data_size = multipath_per_bio_data_size(); | 1190 | ti->per_io_data_size = multipath_per_bio_data_size(); |
1198 | else | 1191 | else |
1199 | ti->per_io_data_size = sizeof(struct dm_mpath_io); | 1192 | ti->per_io_data_size = sizeof(struct dm_mpath_io); |
@@ -1730,9 +1723,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type, | |||
1730 | case DM_TYPE_BIO_BASED: | 1723 | case DM_TYPE_BIO_BASED: |
1731 | DMEMIT("queue_mode bio "); | 1724 | DMEMIT("queue_mode bio "); |
1732 | break; | 1725 | break; |
1733 | case DM_TYPE_NVME_BIO_BASED: | ||
1734 | DMEMIT("queue_mode nvme "); | ||
1735 | break; | ||
1736 | case DM_TYPE_MQ_REQUEST_BASED: | 1726 | case DM_TYPE_MQ_REQUEST_BASED: |
1737 | DMEMIT("queue_mode mq "); | 1727 | DMEMIT("queue_mode mq "); |
1738 | break; | 1728 | break; |
@@ -2030,8 +2020,9 @@ static int multipath_busy(struct dm_target *ti) | |||
2030 | *---------------------------------------------------------------*/ | 2020 | *---------------------------------------------------------------*/ |
2031 | static struct target_type multipath_target = { | 2021 | static struct target_type multipath_target = { |
2032 | .name = "multipath", | 2022 | .name = "multipath", |
2033 | .version = {1, 12, 0}, | 2023 | .version = {1, 13, 0}, |
2034 | .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, | 2024 | .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | |
2025 | DM_TARGET_PASSES_INTEGRITY, | ||
2035 | .module = THIS_MODULE, | 2026 | .module = THIS_MODULE, |
2036 | .ctr = multipath_ctr, | 2027 | .ctr = multipath_ctr, |
2037 | .dtr = multipath_dtr, | 2028 | .dtr = multipath_dtr, |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 7ef469e902c6..c1d1034ff7b7 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -3408,9 +3408,10 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, | |||
3408 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); | 3408 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); |
3409 | 3409 | ||
3410 | } else { | 3410 | } else { |
3411 | if (test_bit(MD_RECOVERY_NEEDED, &recovery) || | 3411 | if (!test_bit(MD_RECOVERY_INTR, &recovery) && |
3412 | test_bit(MD_RECOVERY_RESHAPE, &recovery) || | 3412 | (test_bit(MD_RECOVERY_NEEDED, &recovery) || |
3413 | test_bit(MD_RECOVERY_RUNNING, &recovery)) | 3413 | test_bit(MD_RECOVERY_RESHAPE, &recovery) || |
3414 | test_bit(MD_RECOVERY_RUNNING, &recovery))) | ||
3414 | r = mddev->curr_resync_completed; | 3415 | r = mddev->curr_resync_completed; |
3415 | else | 3416 | else |
3416 | r = mddev->recovery_cp; | 3417 | r = mddev->recovery_cp; |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5fe7ec356c33..7eb3e2a3c07d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -942,17 +942,12 @@ static int dm_table_determine_type(struct dm_table *t) | |||
942 | 942 | ||
943 | if (t->type != DM_TYPE_NONE) { | 943 | if (t->type != DM_TYPE_NONE) { |
944 | /* target already set the table's type */ | 944 | /* target already set the table's type */ |
945 | if (t->type == DM_TYPE_BIO_BASED) | 945 | if (t->type == DM_TYPE_BIO_BASED) { |
946 | return 0; | 946 | /* possibly upgrade to a variant of bio-based */ |
947 | else if (t->type == DM_TYPE_NVME_BIO_BASED) { | 947 | goto verify_bio_based; |
948 | if (!dm_table_does_not_support_partial_completion(t)) { | ||
949 | DMERR("nvme bio-based is only possible with devices" | ||
950 | " that don't support partial completion"); | ||
951 | return -EINVAL; | ||
952 | } | ||
953 | /* Fallthru, also verify all devices are blk-mq */ | ||
954 | } | 948 | } |
955 | BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); | 949 | BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); |
950 | BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED); | ||
956 | goto verify_rq_based; | 951 | goto verify_rq_based; |
957 | } | 952 | } |
958 | 953 | ||
@@ -985,6 +980,7 @@ static int dm_table_determine_type(struct dm_table *t) | |||
985 | } | 980 | } |
986 | 981 | ||
987 | if (bio_based) { | 982 | if (bio_based) { |
983 | verify_bio_based: | ||
988 | /* We must use this table as bio-based */ | 984 | /* We must use this table as bio-based */ |
989 | t->type = DM_TYPE_BIO_BASED; | 985 | t->type = DM_TYPE_BIO_BASED; |
990 | if (dm_table_supports_dax(t) || | 986 | if (dm_table_supports_dax(t) || |
@@ -1755,7 +1751,7 @@ static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev | |||
1755 | char b[BDEVNAME_SIZE]; | 1751 | char b[BDEVNAME_SIZE]; |
1756 | 1752 | ||
1757 | /* For now, NVMe devices are the only devices of this class */ | 1753 | /* For now, NVMe devices are the only devices of this class */ |
1758 | return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0); | 1754 | return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); |
1759 | } | 1755 | } |
1760 | 1756 | ||
1761 | static bool dm_table_does_not_support_partial_completion(struct dm_table *t) | 1757 | static bool dm_table_does_not_support_partial_completion(struct dm_table *t) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 68136806d365..45328d8b2859 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -458,9 +458,11 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
458 | return dm_get_geometry(md, geo); | 458 | return dm_get_geometry(md, geo); |
459 | } | 459 | } |
460 | 460 | ||
461 | static int dm_grab_bdev_for_ioctl(struct mapped_device *md, | 461 | static char *_dm_claim_ptr = "I belong to device-mapper"; |
462 | struct block_device **bdev, | 462 | |
463 | fmode_t *mode) | 463 | static int dm_get_bdev_for_ioctl(struct mapped_device *md, |
464 | struct block_device **bdev, | ||
465 | fmode_t *mode) | ||
464 | { | 466 | { |
465 | struct dm_target *tgt; | 467 | struct dm_target *tgt; |
466 | struct dm_table *map; | 468 | struct dm_table *map; |
@@ -490,6 +492,10 @@ retry: | |||
490 | goto out; | 492 | goto out; |
491 | 493 | ||
492 | bdgrab(*bdev); | 494 | bdgrab(*bdev); |
495 | r = blkdev_get(*bdev, *mode, _dm_claim_ptr); | ||
496 | if (r < 0) | ||
497 | goto out; | ||
498 | |||
493 | dm_put_live_table(md, srcu_idx); | 499 | dm_put_live_table(md, srcu_idx); |
494 | return r; | 500 | return r; |
495 | 501 | ||
@@ -508,7 +514,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
508 | struct mapped_device *md = bdev->bd_disk->private_data; | 514 | struct mapped_device *md = bdev->bd_disk->private_data; |
509 | int r; | 515 | int r; |
510 | 516 | ||
511 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 517 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
512 | if (r < 0) | 518 | if (r < 0) |
513 | return r; | 519 | return r; |
514 | 520 | ||
@@ -528,7 +534,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
528 | 534 | ||
529 | r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); | 535 | r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); |
530 | out: | 536 | out: |
531 | bdput(bdev); | 537 | blkdev_put(bdev, mode); |
532 | return r; | 538 | return r; |
533 | } | 539 | } |
534 | 540 | ||
@@ -708,14 +714,13 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) | |||
708 | static int open_table_device(struct table_device *td, dev_t dev, | 714 | static int open_table_device(struct table_device *td, dev_t dev, |
709 | struct mapped_device *md) | 715 | struct mapped_device *md) |
710 | { | 716 | { |
711 | static char *_claim_ptr = "I belong to device-mapper"; | ||
712 | struct block_device *bdev; | 717 | struct block_device *bdev; |
713 | 718 | ||
714 | int r; | 719 | int r; |
715 | 720 | ||
716 | BUG_ON(td->dm_dev.bdev); | 721 | BUG_ON(td->dm_dev.bdev); |
717 | 722 | ||
718 | bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); | 723 | bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); |
719 | if (IS_ERR(bdev)) | 724 | if (IS_ERR(bdev)) |
720 | return PTR_ERR(bdev); | 725 | return PTR_ERR(bdev); |
721 | 726 | ||
@@ -3011,7 +3016,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, | |||
3011 | fmode_t mode; | 3016 | fmode_t mode; |
3012 | int r; | 3017 | int r; |
3013 | 3018 | ||
3014 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3019 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3015 | if (r < 0) | 3020 | if (r < 0) |
3016 | return r; | 3021 | return r; |
3017 | 3022 | ||
@@ -3021,7 +3026,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, | |||
3021 | else | 3026 | else |
3022 | r = -EOPNOTSUPP; | 3027 | r = -EOPNOTSUPP; |
3023 | 3028 | ||
3024 | bdput(bdev); | 3029 | blkdev_put(bdev, mode); |
3025 | return r; | 3030 | return r; |
3026 | } | 3031 | } |
3027 | 3032 | ||
@@ -3032,7 +3037,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) | |||
3032 | fmode_t mode; | 3037 | fmode_t mode; |
3033 | int r; | 3038 | int r; |
3034 | 3039 | ||
3035 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3040 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3036 | if (r < 0) | 3041 | if (r < 0) |
3037 | return r; | 3042 | return r; |
3038 | 3043 | ||
@@ -3042,7 +3047,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) | |||
3042 | else | 3047 | else |
3043 | r = -EOPNOTSUPP; | 3048 | r = -EOPNOTSUPP; |
3044 | 3049 | ||
3045 | bdput(bdev); | 3050 | blkdev_put(bdev, mode); |
3046 | return r; | 3051 | return r; |
3047 | } | 3052 | } |
3048 | 3053 | ||
@@ -3054,7 +3059,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, | |||
3054 | fmode_t mode; | 3059 | fmode_t mode; |
3055 | int r; | 3060 | int r; |
3056 | 3061 | ||
3057 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3062 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3058 | if (r < 0) | 3063 | if (r < 0) |
3059 | return r; | 3064 | return r; |
3060 | 3065 | ||
@@ -3064,7 +3069,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, | |||
3064 | else | 3069 | else |
3065 | r = -EOPNOTSUPP; | 3070 | r = -EOPNOTSUPP; |
3066 | 3071 | ||
3067 | bdput(bdev); | 3072 | blkdev_put(bdev, mode); |
3068 | return r; | 3073 | return r; |
3069 | } | 3074 | } |
3070 | 3075 | ||
@@ -3075,7 +3080,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) | |||
3075 | fmode_t mode; | 3080 | fmode_t mode; |
3076 | int r; | 3081 | int r; |
3077 | 3082 | ||
3078 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3083 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3079 | if (r < 0) | 3084 | if (r < 0) |
3080 | return r; | 3085 | return r; |
3081 | 3086 | ||
@@ -3085,7 +3090,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) | |||
3085 | else | 3090 | else |
3086 | r = -EOPNOTSUPP; | 3091 | r = -EOPNOTSUPP; |
3087 | 3092 | ||
3088 | bdput(bdev); | 3093 | blkdev_put(bdev, mode); |
3089 | return r; | 3094 | return r; |
3090 | } | 3095 | } |
3091 | 3096 | ||
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index e40065bdbfc8..0a7e99d62c69 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c | |||
@@ -157,7 +157,7 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev) | |||
157 | seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); | 157 | seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); |
158 | } | 158 | } |
159 | rcu_read_unlock(); | 159 | rcu_read_unlock(); |
160 | seq_printf (seq, "]"); | 160 | seq_putc(seq, ']'); |
161 | } | 161 | } |
162 | 162 | ||
163 | static int multipath_congested(struct mddev *mddev, int bits) | 163 | static int multipath_congested(struct mddev *mddev, int bits) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index bc67ab6844f0..254e44e44668 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -801,6 +801,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | |||
801 | struct bio *bio; | 801 | struct bio *bio; |
802 | int ff = 0; | 802 | int ff = 0; |
803 | 803 | ||
804 | if (!page) | ||
805 | return; | ||
806 | |||
804 | if (test_bit(Faulty, &rdev->flags)) | 807 | if (test_bit(Faulty, &rdev->flags)) |
805 | return; | 808 | return; |
806 | 809 | ||
@@ -5452,6 +5455,7 @@ int md_run(struct mddev *mddev) | |||
5452 | * the only valid external interface is through the md | 5455 | * the only valid external interface is through the md |
5453 | * device. | 5456 | * device. |
5454 | */ | 5457 | */ |
5458 | mddev->has_superblocks = false; | ||
5455 | rdev_for_each(rdev, mddev) { | 5459 | rdev_for_each(rdev, mddev) { |
5456 | if (test_bit(Faulty, &rdev->flags)) | 5460 | if (test_bit(Faulty, &rdev->flags)) |
5457 | continue; | 5461 | continue; |
@@ -5465,6 +5469,9 @@ int md_run(struct mddev *mddev) | |||
5465 | set_disk_ro(mddev->gendisk, 1); | 5469 | set_disk_ro(mddev->gendisk, 1); |
5466 | } | 5470 | } |
5467 | 5471 | ||
5472 | if (rdev->sb_page) | ||
5473 | mddev->has_superblocks = true; | ||
5474 | |||
5468 | /* perform some consistency tests on the device. | 5475 | /* perform some consistency tests on the device. |
5469 | * We don't want the data to overlap the metadata, | 5476 | * We don't want the data to overlap the metadata, |
5470 | * Internal Bitmap issues have been handled elsewhere. | 5477 | * Internal Bitmap issues have been handled elsewhere. |
@@ -5497,8 +5504,10 @@ int md_run(struct mddev *mddev) | |||
5497 | } | 5504 | } |
5498 | if (mddev->sync_set == NULL) { | 5505 | if (mddev->sync_set == NULL) { |
5499 | mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); | 5506 | mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); |
5500 | if (!mddev->sync_set) | 5507 | if (!mddev->sync_set) { |
5501 | return -ENOMEM; | 5508 | err = -ENOMEM; |
5509 | goto abort; | ||
5510 | } | ||
5502 | } | 5511 | } |
5503 | 5512 | ||
5504 | spin_lock(&pers_lock); | 5513 | spin_lock(&pers_lock); |
@@ -5511,7 +5520,8 @@ int md_run(struct mddev *mddev) | |||
5511 | else | 5520 | else |
5512 | pr_warn("md: personality for level %s is not loaded!\n", | 5521 | pr_warn("md: personality for level %s is not loaded!\n", |
5513 | mddev->clevel); | 5522 | mddev->clevel); |
5514 | return -EINVAL; | 5523 | err = -EINVAL; |
5524 | goto abort; | ||
5515 | } | 5525 | } |
5516 | spin_unlock(&pers_lock); | 5526 | spin_unlock(&pers_lock); |
5517 | if (mddev->level != pers->level) { | 5527 | if (mddev->level != pers->level) { |
@@ -5524,7 +5534,8 @@ int md_run(struct mddev *mddev) | |||
5524 | pers->start_reshape == NULL) { | 5534 | pers->start_reshape == NULL) { |
5525 | /* This personality cannot handle reshaping... */ | 5535 | /* This personality cannot handle reshaping... */ |
5526 | module_put(pers->owner); | 5536 | module_put(pers->owner); |
5527 | return -EINVAL; | 5537 | err = -EINVAL; |
5538 | goto abort; | ||
5528 | } | 5539 | } |
5529 | 5540 | ||
5530 | if (pers->sync_request) { | 5541 | if (pers->sync_request) { |
@@ -5593,7 +5604,7 @@ int md_run(struct mddev *mddev) | |||
5593 | mddev->private = NULL; | 5604 | mddev->private = NULL; |
5594 | module_put(pers->owner); | 5605 | module_put(pers->owner); |
5595 | bitmap_destroy(mddev); | 5606 | bitmap_destroy(mddev); |
5596 | return err; | 5607 | goto abort; |
5597 | } | 5608 | } |
5598 | if (mddev->queue) { | 5609 | if (mddev->queue) { |
5599 | bool nonrot = true; | 5610 | bool nonrot = true; |
@@ -5655,6 +5666,18 @@ int md_run(struct mddev *mddev) | |||
5655 | sysfs_notify_dirent_safe(mddev->sysfs_action); | 5666 | sysfs_notify_dirent_safe(mddev->sysfs_action); |
5656 | sysfs_notify(&mddev->kobj, NULL, "degraded"); | 5667 | sysfs_notify(&mddev->kobj, NULL, "degraded"); |
5657 | return 0; | 5668 | return 0; |
5669 | |||
5670 | abort: | ||
5671 | if (mddev->bio_set) { | ||
5672 | bioset_free(mddev->bio_set); | ||
5673 | mddev->bio_set = NULL; | ||
5674 | } | ||
5675 | if (mddev->sync_set) { | ||
5676 | bioset_free(mddev->sync_set); | ||
5677 | mddev->sync_set = NULL; | ||
5678 | } | ||
5679 | |||
5680 | return err; | ||
5658 | } | 5681 | } |
5659 | EXPORT_SYMBOL_GPL(md_run); | 5682 | EXPORT_SYMBOL_GPL(md_run); |
5660 | 5683 | ||
@@ -8049,6 +8072,7 @@ EXPORT_SYMBOL(md_done_sync); | |||
8049 | bool md_write_start(struct mddev *mddev, struct bio *bi) | 8072 | bool md_write_start(struct mddev *mddev, struct bio *bi) |
8050 | { | 8073 | { |
8051 | int did_change = 0; | 8074 | int did_change = 0; |
8075 | |||
8052 | if (bio_data_dir(bi) != WRITE) | 8076 | if (bio_data_dir(bi) != WRITE) |
8053 | return true; | 8077 | return true; |
8054 | 8078 | ||
@@ -8081,6 +8105,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) | |||
8081 | rcu_read_unlock(); | 8105 | rcu_read_unlock(); |
8082 | if (did_change) | 8106 | if (did_change) |
8083 | sysfs_notify_dirent_safe(mddev->sysfs_state); | 8107 | sysfs_notify_dirent_safe(mddev->sysfs_state); |
8108 | if (!mddev->has_superblocks) | ||
8109 | return true; | ||
8084 | wait_event(mddev->sb_wait, | 8110 | wait_event(mddev->sb_wait, |
8085 | !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || | 8111 | !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || |
8086 | mddev->suspended); | 8112 | mddev->suspended); |
@@ -8543,6 +8569,19 @@ void md_do_sync(struct md_thread *thread) | |||
8543 | set_mask_bits(&mddev->sb_flags, 0, | 8569 | set_mask_bits(&mddev->sb_flags, 0, |
8544 | BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); | 8570 | BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); |
8545 | 8571 | ||
8572 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && | ||
8573 | !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | ||
8574 | mddev->delta_disks > 0 && | ||
8575 | mddev->pers->finish_reshape && | ||
8576 | mddev->pers->size && | ||
8577 | mddev->queue) { | ||
8578 | mddev_lock_nointr(mddev); | ||
8579 | md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); | ||
8580 | mddev_unlock(mddev); | ||
8581 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
8582 | revalidate_disk(mddev->gendisk); | ||
8583 | } | ||
8584 | |||
8546 | spin_lock(&mddev->lock); | 8585 | spin_lock(&mddev->lock); |
8547 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 8586 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
8548 | /* We completed so min/max setting can be forgotten if used. */ | 8587 | /* We completed so min/max setting can be forgotten if used. */ |
@@ -8569,6 +8608,10 @@ static int remove_and_add_spares(struct mddev *mddev, | |||
8569 | int removed = 0; | 8608 | int removed = 0; |
8570 | bool remove_some = false; | 8609 | bool remove_some = false; |
8571 | 8610 | ||
8611 | if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | ||
8612 | /* Mustn't remove devices when resync thread is running */ | ||
8613 | return 0; | ||
8614 | |||
8572 | rdev_for_each(rdev, mddev) { | 8615 | rdev_for_each(rdev, mddev) { |
8573 | if ((this == NULL || rdev == this) && | 8616 | if ((this == NULL || rdev == this) && |
8574 | rdev->raid_disk >= 0 && | 8617 | rdev->raid_disk >= 0 && |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 58cd20a5e85e..fbc925cce810 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -468,6 +468,8 @@ struct mddev { | |||
468 | void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); | 468 | void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); |
469 | struct md_cluster_info *cluster_info; | 469 | struct md_cluster_info *cluster_info; |
470 | unsigned int good_device_nr; /* good device num within cluster raid */ | 470 | unsigned int good_device_nr; /* good device num within cluster raid */ |
471 | |||
472 | bool has_superblocks:1; | ||
471 | }; | 473 | }; |
472 | 474 | ||
473 | enum recovery_flags { | 475 | enum recovery_flags { |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f978eddc7a21..fe872dc6712e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1809,6 +1809,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1809 | struct md_rdev *repl = | 1809 | struct md_rdev *repl = |
1810 | conf->mirrors[conf->raid_disks + number].rdev; | 1810 | conf->mirrors[conf->raid_disks + number].rdev; |
1811 | freeze_array(conf, 0); | 1811 | freeze_array(conf, 0); |
1812 | if (atomic_read(&repl->nr_pending)) { | ||
1813 | /* It means that some queued IO of retry_list | ||
1814 | * hold repl. Thus, we cannot set replacement | ||
1815 | * as NULL, avoiding rdev NULL pointer | ||
1816 | * dereference in sync_request_write and | ||
1817 | * handle_write_finished. | ||
1818 | */ | ||
1819 | err = -EBUSY; | ||
1820 | unfreeze_array(conf); | ||
1821 | goto abort; | ||
1822 | } | ||
1812 | clear_bit(Replacement, &repl->flags); | 1823 | clear_bit(Replacement, &repl->flags); |
1813 | p->rdev = repl; | 1824 | p->rdev = repl; |
1814 | conf->mirrors[conf->raid_disks + number].rdev = NULL; | 1825 | conf->mirrors[conf->raid_disks + number].rdev = NULL; |
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index c7294e7557e0..eb84bc68e2fd 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h | |||
@@ -26,6 +26,18 @@ | |||
26 | #define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - ilog2(sizeof(atomic_t))) | 26 | #define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - ilog2(sizeof(atomic_t))) |
27 | #define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS) | 27 | #define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS) |
28 | 28 | ||
29 | /* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk. | ||
30 | * There are three safe ways to access raid1_info.rdev. | ||
31 | * 1/ when holding mddev->reconfig_mutex | ||
32 | * 2/ when resync/recovery is known to be happening - i.e. in code that is | ||
33 | * called as part of performing resync/recovery. | ||
34 | * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer | ||
35 | * and if it is non-NULL, increment rdev->nr_pending before dropping the | ||
36 | * RCU lock. | ||
37 | * When .rdev is set to NULL, the nr_pending count checked again and if it has | ||
38 | * been incremented, the pointer is put back in .rdev. | ||
39 | */ | ||
40 | |||
29 | struct raid1_info { | 41 | struct raid1_info { |
30 | struct md_rdev *rdev; | 42 | struct md_rdev *rdev; |
31 | sector_t head_position; | 43 | sector_t head_position; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 99c9207899a7..c5e6c60fc0d4 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -141,7 +141,7 @@ static void r10bio_pool_free(void *r10_bio, void *data) | |||
141 | #define RESYNC_WINDOW (1024*1024) | 141 | #define RESYNC_WINDOW (1024*1024) |
142 | /* maximum number of concurrent requests, memory permitting */ | 142 | /* maximum number of concurrent requests, memory permitting */ |
143 | #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) | 143 | #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) |
144 | #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) | 144 | #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW) |
145 | #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) | 145 | #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) |
146 | 146 | ||
147 | /* | 147 | /* |
@@ -2655,7 +2655,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2655 | for (m = 0; m < conf->copies; m++) { | 2655 | for (m = 0; m < conf->copies; m++) { |
2656 | int dev = r10_bio->devs[m].devnum; | 2656 | int dev = r10_bio->devs[m].devnum; |
2657 | rdev = conf->mirrors[dev].rdev; | 2657 | rdev = conf->mirrors[dev].rdev; |
2658 | if (r10_bio->devs[m].bio == NULL) | 2658 | if (r10_bio->devs[m].bio == NULL || |
2659 | r10_bio->devs[m].bio->bi_end_io == NULL) | ||
2659 | continue; | 2660 | continue; |
2660 | if (!r10_bio->devs[m].bio->bi_status) { | 2661 | if (!r10_bio->devs[m].bio->bi_status) { |
2661 | rdev_clear_badblocks( | 2662 | rdev_clear_badblocks( |
@@ -2670,7 +2671,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2670 | md_error(conf->mddev, rdev); | 2671 | md_error(conf->mddev, rdev); |
2671 | } | 2672 | } |
2672 | rdev = conf->mirrors[dev].replacement; | 2673 | rdev = conf->mirrors[dev].replacement; |
2673 | if (r10_bio->devs[m].repl_bio == NULL) | 2674 | if (r10_bio->devs[m].repl_bio == NULL || |
2675 | r10_bio->devs[m].repl_bio->bi_end_io == NULL) | ||
2674 | continue; | 2676 | continue; |
2675 | 2677 | ||
2676 | if (!r10_bio->devs[m].repl_bio->bi_status) { | 2678 | if (!r10_bio->devs[m].repl_bio->bi_status) { |
@@ -3782,7 +3784,7 @@ static int raid10_run(struct mddev *mddev) | |||
3782 | if (fc > 1 || fo > 0) { | 3784 | if (fc > 1 || fo > 0) { |
3783 | pr_err("only near layout is supported by clustered" | 3785 | pr_err("only near layout is supported by clustered" |
3784 | " raid10\n"); | 3786 | " raid10\n"); |
3785 | goto out; | 3787 | goto out_free_conf; |
3786 | } | 3788 | } |
3787 | } | 3789 | } |
3788 | 3790 | ||
@@ -4830,17 +4832,11 @@ static void raid10_finish_reshape(struct mddev *mddev) | |||
4830 | return; | 4832 | return; |
4831 | 4833 | ||
4832 | if (mddev->delta_disks > 0) { | 4834 | if (mddev->delta_disks > 0) { |
4833 | sector_t size = raid10_size(mddev, 0, 0); | ||
4834 | md_set_array_sectors(mddev, size); | ||
4835 | if (mddev->recovery_cp > mddev->resync_max_sectors) { | 4835 | if (mddev->recovery_cp > mddev->resync_max_sectors) { |
4836 | mddev->recovery_cp = mddev->resync_max_sectors; | 4836 | mddev->recovery_cp = mddev->resync_max_sectors; |
4837 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 4837 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
4838 | } | 4838 | } |
4839 | mddev->resync_max_sectors = size; | 4839 | mddev->resync_max_sectors = mddev->array_sectors; |
4840 | if (mddev->queue) { | ||
4841 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
4842 | revalidate_disk(mddev->gendisk); | ||
4843 | } | ||
4844 | } else { | 4840 | } else { |
4845 | int d; | 4841 | int d; |
4846 | rcu_read_lock(); | 4842 | rcu_read_lock(); |
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index db2ac22ac1b4..e2e8840de9bf 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h | |||
@@ -2,6 +2,19 @@ | |||
2 | #ifndef _RAID10_H | 2 | #ifndef _RAID10_H |
3 | #define _RAID10_H | 3 | #define _RAID10_H |
4 | 4 | ||
5 | /* Note: raid10_info.rdev can be set to NULL asynchronously by | ||
6 | * raid10_remove_disk. | ||
7 | * There are three safe ways to access raid10_info.rdev. | ||
8 | * 1/ when holding mddev->reconfig_mutex | ||
9 | * 2/ when resync/recovery/reshape is known to be happening - i.e. in code | ||
10 | * that is called as part of performing resync/recovery/reshape. | ||
11 | * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer | ||
12 | * and if it is non-NULL, increment rdev->nr_pending before dropping the | ||
13 | * RCU lock. | ||
14 | * When .rdev is set to NULL, the nr_pending count checked again and if it has | ||
15 | * been incremented, the pointer is put back in .rdev. | ||
16 | */ | ||
17 | |||
5 | struct raid10_info { | 18 | struct raid10_info { |
6 | struct md_rdev *rdev, *replacement; | 19 | struct md_rdev *rdev, *replacement; |
7 | sector_t head_position; | 20 | sector_t head_position; |
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index 0c76bcedfc1c..a001808a2b77 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h | |||
@@ -44,6 +44,7 @@ extern void ppl_write_stripe_run(struct r5conf *conf); | |||
44 | extern void ppl_stripe_write_finished(struct stripe_head *sh); | 44 | extern void ppl_stripe_write_finished(struct stripe_head *sh); |
45 | extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); | 45 | extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); |
46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); | 46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); |
47 | extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); | ||
47 | 48 | ||
48 | static inline bool raid5_has_ppl(struct r5conf *conf) | 49 | static inline bool raid5_has_ppl(struct r5conf *conf) |
49 | { | 50 | { |
@@ -104,7 +105,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio) | |||
104 | if (conf->log) | 105 | if (conf->log) |
105 | ret = r5l_handle_flush_request(conf->log, bio); | 106 | ret = r5l_handle_flush_request(conf->log, bio); |
106 | else if (raid5_has_ppl(conf)) | 107 | else if (raid5_has_ppl(conf)) |
107 | ret = 0; | 108 | ret = ppl_handle_flush_request(conf->log, bio); |
108 | 109 | ||
109 | return ret; | 110 | return ret; |
110 | } | 111 | } |
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 2764c2290062..42890a08375b 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c | |||
@@ -693,6 +693,16 @@ void ppl_quiesce(struct r5conf *conf, int quiesce) | |||
693 | } | 693 | } |
694 | } | 694 | } |
695 | 695 | ||
696 | int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio) | ||
697 | { | ||
698 | if (bio->bi_iter.bi_size == 0) { | ||
699 | bio_endio(bio); | ||
700 | return 0; | ||
701 | } | ||
702 | bio->bi_opf &= ~REQ_PREFLUSH; | ||
703 | return -EAGAIN; | ||
704 | } | ||
705 | |||
696 | void ppl_stripe_write_finished(struct stripe_head *sh) | 706 | void ppl_stripe_write_finished(struct stripe_head *sh) |
697 | { | 707 | { |
698 | struct ppl_io_unit *io; | 708 | struct ppl_io_unit *io; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 50d01144b805..b5d2601483e3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2196,15 +2196,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) | |||
2196 | static int grow_stripes(struct r5conf *conf, int num) | 2196 | static int grow_stripes(struct r5conf *conf, int num) |
2197 | { | 2197 | { |
2198 | struct kmem_cache *sc; | 2198 | struct kmem_cache *sc; |
2199 | size_t namelen = sizeof(conf->cache_name[0]); | ||
2199 | int devs = max(conf->raid_disks, conf->previous_raid_disks); | 2200 | int devs = max(conf->raid_disks, conf->previous_raid_disks); |
2200 | 2201 | ||
2201 | if (conf->mddev->gendisk) | 2202 | if (conf->mddev->gendisk) |
2202 | sprintf(conf->cache_name[0], | 2203 | snprintf(conf->cache_name[0], namelen, |
2203 | "raid%d-%s", conf->level, mdname(conf->mddev)); | 2204 | "raid%d-%s", conf->level, mdname(conf->mddev)); |
2204 | else | 2205 | else |
2205 | sprintf(conf->cache_name[0], | 2206 | snprintf(conf->cache_name[0], namelen, |
2206 | "raid%d-%p", conf->level, conf->mddev); | 2207 | "raid%d-%p", conf->level, conf->mddev); |
2207 | sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); | 2208 | snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); |
2208 | 2209 | ||
2209 | conf->active_name = 0; | 2210 | conf->active_name = 0; |
2210 | sc = kmem_cache_create(conf->cache_name[conf->active_name], | 2211 | sc = kmem_cache_create(conf->cache_name[conf->active_name], |
@@ -6764,9 +6765,7 @@ static void free_conf(struct r5conf *conf) | |||
6764 | 6765 | ||
6765 | log_exit(conf); | 6766 | log_exit(conf); |
6766 | 6767 | ||
6767 | if (conf->shrinker.nr_deferred) | 6768 | unregister_shrinker(&conf->shrinker); |
6768 | unregister_shrinker(&conf->shrinker); | ||
6769 | |||
6770 | free_thread_groups(conf); | 6769 | free_thread_groups(conf); |
6771 | shrink_stripes(conf); | 6770 | shrink_stripes(conf); |
6772 | raid5_free_percpu(conf); | 6771 | raid5_free_percpu(conf); |
@@ -8001,13 +8000,7 @@ static void raid5_finish_reshape(struct mddev *mddev) | |||
8001 | 8000 | ||
8002 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 8001 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
8003 | 8002 | ||
8004 | if (mddev->delta_disks > 0) { | 8003 | if (mddev->delta_disks <= 0) { |
8005 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); | ||
8006 | if (mddev->queue) { | ||
8007 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
8008 | revalidate_disk(mddev->gendisk); | ||
8009 | } | ||
8010 | } else { | ||
8011 | int d; | 8004 | int d; |
8012 | spin_lock_irq(&conf->device_lock); | 8005 | spin_lock_irq(&conf->device_lock); |
8013 | mddev->degraded = raid5_calc_degraded(conf); | 8006 | mddev->degraded = raid5_calc_degraded(conf); |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2e6123825095..3f8da26032ac 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -450,6 +450,18 @@ enum { | |||
450 | * HANDLE gets cleared if stripe_handle leaves nothing locked. | 450 | * HANDLE gets cleared if stripe_handle leaves nothing locked. |
451 | */ | 451 | */ |
452 | 452 | ||
453 | /* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk. | ||
454 | * There are three safe ways to access disk_info.rdev. | ||
455 | * 1/ when holding mddev->reconfig_mutex | ||
456 | * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that | ||
457 | * is called as part of performing resync/recovery/reshape. | ||
458 | * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer | ||
459 | * and if it is non-NULL, increment rdev->nr_pending before dropping the RCU | ||
460 | * lock. | ||
461 | * When .rdev is set to NULL, the nr_pending count checked again and if | ||
462 | * it has been incremented, the pointer is put back in .rdev. | ||
463 | */ | ||
464 | |||
453 | struct disk_info { | 465 | struct disk_info { |
454 | struct md_rdev *rdev, *replacement; | 466 | struct md_rdev *rdev, *replacement; |
455 | struct page *extra_page; /* extra page to use in prexor */ | 467 | struct page *extra_page; /* extra page to use in prexor */ |
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 145e12bfb819..372c074bb1b9 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig | |||
@@ -147,6 +147,8 @@ config DVB_CORE | |||
147 | config DVB_MMAP | 147 | config DVB_MMAP |
148 | bool "Enable DVB memory-mapped API (EXPERIMENTAL)" | 148 | bool "Enable DVB memory-mapped API (EXPERIMENTAL)" |
149 | depends on DVB_CORE | 149 | depends on DVB_CORE |
150 | depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_CORE | ||
151 | select VIDEOBUF2_VMALLOC | ||
150 | default n | 152 | default n |
151 | help | 153 | help |
152 | This option enables DVB experimental memory-mapped API, with | 154 | This option enables DVB experimental memory-mapped API, with |
diff --git a/drivers/media/common/videobuf2/Kconfig b/drivers/media/common/videobuf2/Kconfig index 5df05250de94..17c32ea58395 100644 --- a/drivers/media/common/videobuf2/Kconfig +++ b/drivers/media/common/videobuf2/Kconfig | |||
@@ -3,6 +3,9 @@ config VIDEOBUF2_CORE | |||
3 | select DMA_SHARED_BUFFER | 3 | select DMA_SHARED_BUFFER |
4 | tristate | 4 | tristate |
5 | 5 | ||
6 | config VIDEOBUF2_V4L2 | ||
7 | tristate | ||
8 | |||
6 | config VIDEOBUF2_MEMOPS | 9 | config VIDEOBUF2_MEMOPS |
7 | tristate | 10 | tristate |
8 | select FRAME_VECTOR | 11 | select FRAME_VECTOR |
diff --git a/drivers/media/common/videobuf2/Makefile b/drivers/media/common/videobuf2/Makefile index 19de5ccda20b..77bebe8b202f 100644 --- a/drivers/media/common/videobuf2/Makefile +++ b/drivers/media/common/videobuf2/Makefile | |||
@@ -1,5 +1,12 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | videobuf2-common-objs := videobuf2-core.o | ||
1 | 3 | ||
2 | obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o videobuf2-v4l2.o | 4 | ifeq ($(CONFIG_TRACEPOINTS),y) |
5 | videobuf2-common-objs += vb2-trace.o | ||
6 | endif | ||
7 | |||
8 | obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-common.o | ||
9 | obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o | ||
3 | obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o | 10 | obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o |
4 | obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o | 11 | obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o |
5 | obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o | 12 | obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o |
diff --git a/drivers/media/v4l2-core/vb2-trace.c b/drivers/media/common/videobuf2/vb2-trace.c index 4c0f39d271f0..4c0f39d271f0 100644 --- a/drivers/media/v4l2-core/vb2-trace.c +++ b/drivers/media/common/videobuf2/vb2-trace.c | |||
diff --git a/drivers/media/dvb-core/Makefile b/drivers/media/dvb-core/Makefile index 3a105d82019a..62b028ded9f7 100644 --- a/drivers/media/dvb-core/Makefile +++ b/drivers/media/dvb-core/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | # | 4 | # |
5 | 5 | ||
6 | dvb-net-$(CONFIG_DVB_NET) := dvb_net.o | 6 | dvb-net-$(CONFIG_DVB_NET) := dvb_net.o |
7 | dvb-vb2-$(CONFIG_DVB_MMSP) := dvb_vb2.o | 7 | dvb-vb2-$(CONFIG_DVB_MMAP) := dvb_vb2.o |
8 | 8 | ||
9 | dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o \ | 9 | dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o \ |
10 | dvb_ca_en50221.o dvb_frontend.o \ | 10 | dvb_ca_en50221.o dvb_frontend.o \ |
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index 6d53af00190e..61a750fae465 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c | |||
@@ -128,11 +128,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file) | |||
128 | struct dvb_device *dvbdev = file->private_data; | 128 | struct dvb_device *dvbdev = file->private_data; |
129 | struct dmxdev *dmxdev = dvbdev->priv; | 129 | struct dmxdev *dmxdev = dvbdev->priv; |
130 | struct dmx_frontend *front; | 130 | struct dmx_frontend *front; |
131 | #ifndef DVB_MMAP | ||
132 | bool need_ringbuffer = false; | 131 | bool need_ringbuffer = false; |
133 | #else | ||
134 | const bool need_ringbuffer = true; | ||
135 | #endif | ||
136 | 132 | ||
137 | dprintk("%s\n", __func__); | 133 | dprintk("%s\n", __func__); |
138 | 134 | ||
@@ -144,17 +140,31 @@ static int dvb_dvr_open(struct inode *inode, struct file *file) | |||
144 | return -ENODEV; | 140 | return -ENODEV; |
145 | } | 141 | } |
146 | 142 | ||
147 | #ifndef DVB_MMAP | 143 | dmxdev->may_do_mmap = 0; |
144 | |||
145 | /* | ||
146 | * The logic here is a little tricky due to the ifdef. | ||
147 | * | ||
148 | * The ringbuffer is used for both read and mmap. | ||
149 | * | ||
150 | * It is not needed, however, on two situations: | ||
151 | * - Write devices (access with O_WRONLY); | ||
152 | * - For duplex device nodes, opened with O_RDWR. | ||
153 | */ | ||
154 | |||
148 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) | 155 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) |
149 | need_ringbuffer = true; | 156 | need_ringbuffer = true; |
150 | #else | 157 | else if ((file->f_flags & O_ACCMODE) == O_RDWR) { |
151 | if ((file->f_flags & O_ACCMODE) == O_RDWR) { | ||
152 | if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) { | 158 | if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) { |
159 | #ifdef CONFIG_DVB_MMAP | ||
160 | dmxdev->may_do_mmap = 1; | ||
161 | need_ringbuffer = true; | ||
162 | #else | ||
153 | mutex_unlock(&dmxdev->mutex); | 163 | mutex_unlock(&dmxdev->mutex); |
154 | return -EOPNOTSUPP; | 164 | return -EOPNOTSUPP; |
165 | #endif | ||
155 | } | 166 | } |
156 | } | 167 | } |
157 | #endif | ||
158 | 168 | ||
159 | if (need_ringbuffer) { | 169 | if (need_ringbuffer) { |
160 | void *mem; | 170 | void *mem; |
@@ -169,8 +179,9 @@ static int dvb_dvr_open(struct inode *inode, struct file *file) | |||
169 | return -ENOMEM; | 179 | return -ENOMEM; |
170 | } | 180 | } |
171 | dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); | 181 | dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); |
172 | dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr", | 182 | if (dmxdev->may_do_mmap) |
173 | file->f_flags & O_NONBLOCK); | 183 | dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr", |
184 | file->f_flags & O_NONBLOCK); | ||
174 | dvbdev->readers--; | 185 | dvbdev->readers--; |
175 | } | 186 | } |
176 | 187 | ||
@@ -200,11 +211,6 @@ static int dvb_dvr_release(struct inode *inode, struct file *file) | |||
200 | { | 211 | { |
201 | struct dvb_device *dvbdev = file->private_data; | 212 | struct dvb_device *dvbdev = file->private_data; |
202 | struct dmxdev *dmxdev = dvbdev->priv; | 213 | struct dmxdev *dmxdev = dvbdev->priv; |
203 | #ifndef DVB_MMAP | ||
204 | bool need_ringbuffer = false; | ||
205 | #else | ||
206 | const bool need_ringbuffer = true; | ||
207 | #endif | ||
208 | 214 | ||
209 | mutex_lock(&dmxdev->mutex); | 215 | mutex_lock(&dmxdev->mutex); |
210 | 216 | ||
@@ -213,15 +219,14 @@ static int dvb_dvr_release(struct inode *inode, struct file *file) | |||
213 | dmxdev->demux->connect_frontend(dmxdev->demux, | 219 | dmxdev->demux->connect_frontend(dmxdev->demux, |
214 | dmxdev->dvr_orig_fe); | 220 | dmxdev->dvr_orig_fe); |
215 | } | 221 | } |
216 | #ifndef DVB_MMAP | ||
217 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) | ||
218 | need_ringbuffer = true; | ||
219 | #endif | ||
220 | 222 | ||
221 | if (need_ringbuffer) { | 223 | if (((file->f_flags & O_ACCMODE) == O_RDONLY) || |
222 | if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) | 224 | dmxdev->may_do_mmap) { |
223 | dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx); | 225 | if (dmxdev->may_do_mmap) { |
224 | dvb_vb2_release(&dmxdev->dvr_vb2_ctx); | 226 | if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) |
227 | dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx); | ||
228 | dvb_vb2_release(&dmxdev->dvr_vb2_ctx); | ||
229 | } | ||
225 | dvbdev->readers++; | 230 | dvbdev->readers++; |
226 | if (dmxdev->dvr_buffer.data) { | 231 | if (dmxdev->dvr_buffer.data) { |
227 | void *mem = dmxdev->dvr_buffer.data; | 232 | void *mem = dmxdev->dvr_buffer.data; |
@@ -380,7 +385,8 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter) | |||
380 | 385 | ||
381 | static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, | 386 | static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, |
382 | const u8 *buffer2, size_t buffer2_len, | 387 | const u8 *buffer2, size_t buffer2_len, |
383 | struct dmx_section_filter *filter) | 388 | struct dmx_section_filter *filter, |
389 | u32 *buffer_flags) | ||
384 | { | 390 | { |
385 | struct dmxdev_filter *dmxdevfilter = filter->priv; | 391 | struct dmxdev_filter *dmxdevfilter = filter->priv; |
386 | int ret; | 392 | int ret; |
@@ -399,10 +405,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, | |||
399 | dprintk("section callback %*ph\n", 6, buffer1); | 405 | dprintk("section callback %*ph\n", 6, buffer1); |
400 | if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) { | 406 | if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) { |
401 | ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, | 407 | ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, |
402 | buffer1, buffer1_len); | 408 | buffer1, buffer1_len, |
409 | buffer_flags); | ||
403 | if (ret == buffer1_len) | 410 | if (ret == buffer1_len) |
404 | ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, | 411 | ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, |
405 | buffer2, buffer2_len); | 412 | buffer2, buffer2_len, |
413 | buffer_flags); | ||
406 | } else { | 414 | } else { |
407 | ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, | 415 | ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, |
408 | buffer1, buffer1_len); | 416 | buffer1, buffer1_len); |
@@ -422,11 +430,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, | |||
422 | 430 | ||
423 | static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, | 431 | static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, |
424 | const u8 *buffer2, size_t buffer2_len, | 432 | const u8 *buffer2, size_t buffer2_len, |
425 | struct dmx_ts_feed *feed) | 433 | struct dmx_ts_feed *feed, |
434 | u32 *buffer_flags) | ||
426 | { | 435 | { |
427 | struct dmxdev_filter *dmxdevfilter = feed->priv; | 436 | struct dmxdev_filter *dmxdevfilter = feed->priv; |
428 | struct dvb_ringbuffer *buffer; | 437 | struct dvb_ringbuffer *buffer; |
429 | #ifdef DVB_MMAP | 438 | #ifdef CONFIG_DVB_MMAP |
430 | struct dvb_vb2_ctx *ctx; | 439 | struct dvb_vb2_ctx *ctx; |
431 | #endif | 440 | #endif |
432 | int ret; | 441 | int ret; |
@@ -440,20 +449,22 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, | |||
440 | if (dmxdevfilter->params.pes.output == DMX_OUT_TAP || | 449 | if (dmxdevfilter->params.pes.output == DMX_OUT_TAP || |
441 | dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) { | 450 | dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) { |
442 | buffer = &dmxdevfilter->buffer; | 451 | buffer = &dmxdevfilter->buffer; |
443 | #ifdef DVB_MMAP | 452 | #ifdef CONFIG_DVB_MMAP |
444 | ctx = &dmxdevfilter->vb2_ctx; | 453 | ctx = &dmxdevfilter->vb2_ctx; |
445 | #endif | 454 | #endif |
446 | } else { | 455 | } else { |
447 | buffer = &dmxdevfilter->dev->dvr_buffer; | 456 | buffer = &dmxdevfilter->dev->dvr_buffer; |
448 | #ifdef DVB_MMAP | 457 | #ifdef CONFIG_DVB_MMAP |
449 | ctx = &dmxdevfilter->dev->dvr_vb2_ctx; | 458 | ctx = &dmxdevfilter->dev->dvr_vb2_ctx; |
450 | #endif | 459 | #endif |
451 | } | 460 | } |
452 | 461 | ||
453 | if (dvb_vb2_is_streaming(ctx)) { | 462 | if (dvb_vb2_is_streaming(ctx)) { |
454 | ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len); | 463 | ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len, |
464 | buffer_flags); | ||
455 | if (ret == buffer1_len) | 465 | if (ret == buffer1_len) |
456 | ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len); | 466 | ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len, |
467 | buffer_flags); | ||
457 | } else { | 468 | } else { |
458 | if (buffer->error) { | 469 | if (buffer->error) { |
459 | spin_unlock(&dmxdevfilter->dev->lock); | 470 | spin_unlock(&dmxdevfilter->dev->lock); |
@@ -802,6 +813,12 @@ static int dvb_demux_open(struct inode *inode, struct file *file) | |||
802 | mutex_init(&dmxdevfilter->mutex); | 813 | mutex_init(&dmxdevfilter->mutex); |
803 | file->private_data = dmxdevfilter; | 814 | file->private_data = dmxdevfilter; |
804 | 815 | ||
816 | #ifdef CONFIG_DVB_MMAP | ||
817 | dmxdev->may_do_mmap = 1; | ||
818 | #else | ||
819 | dmxdev->may_do_mmap = 0; | ||
820 | #endif | ||
821 | |||
805 | dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); | 822 | dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); |
806 | dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter", | 823 | dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter", |
807 | file->f_flags & O_NONBLOCK); | 824 | file->f_flags & O_NONBLOCK); |
@@ -1111,7 +1128,7 @@ static int dvb_demux_do_ioctl(struct file *file, | |||
1111 | mutex_unlock(&dmxdevfilter->mutex); | 1128 | mutex_unlock(&dmxdevfilter->mutex); |
1112 | break; | 1129 | break; |
1113 | 1130 | ||
1114 | #ifdef DVB_MMAP | 1131 | #ifdef CONFIG_DVB_MMAP |
1115 | case DMX_REQBUFS: | 1132 | case DMX_REQBUFS: |
1116 | if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { | 1133 | if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { |
1117 | mutex_unlock(&dmxdev->mutex); | 1134 | mutex_unlock(&dmxdev->mutex); |
@@ -1160,7 +1177,7 @@ static int dvb_demux_do_ioctl(struct file *file, | |||
1160 | break; | 1177 | break; |
1161 | #endif | 1178 | #endif |
1162 | default: | 1179 | default: |
1163 | ret = -EINVAL; | 1180 | ret = -ENOTTY; |
1164 | break; | 1181 | break; |
1165 | } | 1182 | } |
1166 | mutex_unlock(&dmxdev->mutex); | 1183 | mutex_unlock(&dmxdev->mutex); |
@@ -1199,13 +1216,16 @@ static __poll_t dvb_demux_poll(struct file *file, poll_table *wait) | |||
1199 | return mask; | 1216 | return mask; |
1200 | } | 1217 | } |
1201 | 1218 | ||
1202 | #ifdef DVB_MMAP | 1219 | #ifdef CONFIG_DVB_MMAP |
1203 | static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma) | 1220 | static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma) |
1204 | { | 1221 | { |
1205 | struct dmxdev_filter *dmxdevfilter = file->private_data; | 1222 | struct dmxdev_filter *dmxdevfilter = file->private_data; |
1206 | struct dmxdev *dmxdev = dmxdevfilter->dev; | 1223 | struct dmxdev *dmxdev = dmxdevfilter->dev; |
1207 | int ret; | 1224 | int ret; |
1208 | 1225 | ||
1226 | if (!dmxdev->may_do_mmap) | ||
1227 | return -ENOTTY; | ||
1228 | |||
1209 | if (mutex_lock_interruptible(&dmxdev->mutex)) | 1229 | if (mutex_lock_interruptible(&dmxdev->mutex)) |
1210 | return -ERESTARTSYS; | 1230 | return -ERESTARTSYS; |
1211 | 1231 | ||
@@ -1249,7 +1269,7 @@ static const struct file_operations dvb_demux_fops = { | |||
1249 | .release = dvb_demux_release, | 1269 | .release = dvb_demux_release, |
1250 | .poll = dvb_demux_poll, | 1270 | .poll = dvb_demux_poll, |
1251 | .llseek = default_llseek, | 1271 | .llseek = default_llseek, |
1252 | #ifdef DVB_MMAP | 1272 | #ifdef CONFIG_DVB_MMAP |
1253 | .mmap = dvb_demux_mmap, | 1273 | .mmap = dvb_demux_mmap, |
1254 | #endif | 1274 | #endif |
1255 | }; | 1275 | }; |
@@ -1280,7 +1300,7 @@ static int dvb_dvr_do_ioctl(struct file *file, | |||
1280 | ret = dvb_dvr_set_buffer_size(dmxdev, arg); | 1300 | ret = dvb_dvr_set_buffer_size(dmxdev, arg); |
1281 | break; | 1301 | break; |
1282 | 1302 | ||
1283 | #ifdef DVB_MMAP | 1303 | #ifdef CONFIG_DVB_MMAP |
1284 | case DMX_REQBUFS: | 1304 | case DMX_REQBUFS: |
1285 | ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg); | 1305 | ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg); |
1286 | break; | 1306 | break; |
@@ -1304,7 +1324,7 @@ static int dvb_dvr_do_ioctl(struct file *file, | |||
1304 | break; | 1324 | break; |
1305 | #endif | 1325 | #endif |
1306 | default: | 1326 | default: |
1307 | ret = -EINVAL; | 1327 | ret = -ENOTTY; |
1308 | break; | 1328 | break; |
1309 | } | 1329 | } |
1310 | mutex_unlock(&dmxdev->mutex); | 1330 | mutex_unlock(&dmxdev->mutex); |
@@ -1322,11 +1342,6 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) | |||
1322 | struct dvb_device *dvbdev = file->private_data; | 1342 | struct dvb_device *dvbdev = file->private_data; |
1323 | struct dmxdev *dmxdev = dvbdev->priv; | 1343 | struct dmxdev *dmxdev = dvbdev->priv; |
1324 | __poll_t mask = 0; | 1344 | __poll_t mask = 0; |
1325 | #ifndef DVB_MMAP | ||
1326 | bool need_ringbuffer = false; | ||
1327 | #else | ||
1328 | const bool need_ringbuffer = true; | ||
1329 | #endif | ||
1330 | 1345 | ||
1331 | dprintk("%s\n", __func__); | 1346 | dprintk("%s\n", __func__); |
1332 | 1347 | ||
@@ -1337,11 +1352,8 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) | |||
1337 | 1352 | ||
1338 | poll_wait(file, &dmxdev->dvr_buffer.queue, wait); | 1353 | poll_wait(file, &dmxdev->dvr_buffer.queue, wait); |
1339 | 1354 | ||
1340 | #ifndef DVB_MMAP | 1355 | if (((file->f_flags & O_ACCMODE) == O_RDONLY) || |
1341 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) | 1356 | dmxdev->may_do_mmap) { |
1342 | need_ringbuffer = true; | ||
1343 | #endif | ||
1344 | if (need_ringbuffer) { | ||
1345 | if (dmxdev->dvr_buffer.error) | 1357 | if (dmxdev->dvr_buffer.error) |
1346 | mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); | 1358 | mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); |
1347 | 1359 | ||
@@ -1353,13 +1365,16 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) | |||
1353 | return mask; | 1365 | return mask; |
1354 | } | 1366 | } |
1355 | 1367 | ||
1356 | #ifdef DVB_MMAP | 1368 | #ifdef CONFIG_DVB_MMAP |
1357 | static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma) | 1369 | static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma) |
1358 | { | 1370 | { |
1359 | struct dvb_device *dvbdev = file->private_data; | 1371 | struct dvb_device *dvbdev = file->private_data; |
1360 | struct dmxdev *dmxdev = dvbdev->priv; | 1372 | struct dmxdev *dmxdev = dvbdev->priv; |
1361 | int ret; | 1373 | int ret; |
1362 | 1374 | ||
1375 | if (!dmxdev->may_do_mmap) | ||
1376 | return -ENOTTY; | ||
1377 | |||
1363 | if (dmxdev->exit) | 1378 | if (dmxdev->exit) |
1364 | return -ENODEV; | 1379 | return -ENODEV; |
1365 | 1380 | ||
@@ -1381,7 +1396,7 @@ static const struct file_operations dvb_dvr_fops = { | |||
1381 | .release = dvb_dvr_release, | 1396 | .release = dvb_dvr_release, |
1382 | .poll = dvb_dvr_poll, | 1397 | .poll = dvb_dvr_poll, |
1383 | .llseek = default_llseek, | 1398 | .llseek = default_llseek, |
1384 | #ifdef DVB_MMAP | 1399 | #ifdef CONFIG_DVB_MMAP |
1385 | .mmap = dvb_dvr_mmap, | 1400 | .mmap = dvb_dvr_mmap, |
1386 | #endif | 1401 | #endif |
1387 | }; | 1402 | }; |
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 210eed0269b0..f45091246bdc 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c | |||
@@ -55,6 +55,17 @@ MODULE_PARM_DESC(dvb_demux_feed_err_pkts, | |||
55 | dprintk(x); \ | 55 | dprintk(x); \ |
56 | } while (0) | 56 | } while (0) |
57 | 57 | ||
58 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | ||
59 | # define dprintk_sect_loss(x...) dprintk(x) | ||
60 | #else | ||
61 | # define dprintk_sect_loss(x...) | ||
62 | #endif | ||
63 | |||
64 | #define set_buf_flags(__feed, __flag) \ | ||
65 | do { \ | ||
66 | (__feed)->buffer_flags |= (__flag); \ | ||
67 | } while (0) | ||
68 | |||
58 | /****************************************************************************** | 69 | /****************************************************************************** |
59 | * static inlined helper functions | 70 | * static inlined helper functions |
60 | ******************************************************************************/ | 71 | ******************************************************************************/ |
@@ -104,31 +115,30 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed, | |||
104 | { | 115 | { |
105 | int count = payload(buf); | 116 | int count = payload(buf); |
106 | int p; | 117 | int p; |
107 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | ||
108 | int ccok; | 118 | int ccok; |
109 | u8 cc; | 119 | u8 cc; |
110 | #endif | ||
111 | 120 | ||
112 | if (count == 0) | 121 | if (count == 0) |
113 | return -1; | 122 | return -1; |
114 | 123 | ||
115 | p = 188 - count; | 124 | p = 188 - count; |
116 | 125 | ||
117 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | ||
118 | cc = buf[3] & 0x0f; | 126 | cc = buf[3] & 0x0f; |
119 | ccok = ((feed->cc + 1) & 0x0f) == cc; | 127 | ccok = ((feed->cc + 1) & 0x0f) == cc; |
120 | feed->cc = cc; | 128 | feed->cc = cc; |
121 | if (!ccok) | 129 | if (!ccok) { |
122 | dprintk("missed packet: %d instead of %d!\n", | 130 | set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); |
123 | cc, (feed->cc + 1) & 0x0f); | 131 | dprintk_sect_loss("missed packet: %d instead of %d!\n", |
124 | #endif | 132 | cc, (feed->cc + 1) & 0x0f); |
133 | } | ||
125 | 134 | ||
126 | if (buf[1] & 0x40) // PUSI ? | 135 | if (buf[1] & 0x40) // PUSI ? |
127 | feed->peslen = 0xfffa; | 136 | feed->peslen = 0xfffa; |
128 | 137 | ||
129 | feed->peslen += count; | 138 | feed->peslen += count; |
130 | 139 | ||
131 | return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts); | 140 | return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts, |
141 | &feed->buffer_flags); | ||
132 | } | 142 | } |
133 | 143 | ||
134 | static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed, | 144 | static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed, |
@@ -150,7 +160,7 @@ static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed, | |||
150 | return 0; | 160 | return 0; |
151 | 161 | ||
152 | return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen, | 162 | return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen, |
153 | NULL, 0, &f->filter); | 163 | NULL, 0, &f->filter, &feed->buffer_flags); |
154 | } | 164 | } |
155 | 165 | ||
156 | static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed) | 166 | static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed) |
@@ -169,8 +179,10 @@ static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed) | |||
169 | if (sec->check_crc) { | 179 | if (sec->check_crc) { |
170 | section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0); | 180 | section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0); |
171 | if (section_syntax_indicator && | 181 | if (section_syntax_indicator && |
172 | demux->check_crc32(feed, sec->secbuf, sec->seclen)) | 182 | demux->check_crc32(feed, sec->secbuf, sec->seclen)) { |
183 | set_buf_flags(feed, DMX_BUFFER_FLAG_HAD_CRC32_DISCARD); | ||
173 | return -1; | 184 | return -1; |
185 | } | ||
174 | } | 186 | } |
175 | 187 | ||
176 | do { | 188 | do { |
@@ -187,7 +199,6 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed) | |||
187 | { | 199 | { |
188 | struct dmx_section_feed *sec = &feed->feed.sec; | 200 | struct dmx_section_feed *sec = &feed->feed.sec; |
189 | 201 | ||
190 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | ||
191 | if (sec->secbufp < sec->tsfeedp) { | 202 | if (sec->secbufp < sec->tsfeedp) { |
192 | int n = sec->tsfeedp - sec->secbufp; | 203 | int n = sec->tsfeedp - sec->secbufp; |
193 | 204 | ||
@@ -197,12 +208,13 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed) | |||
197 | * but just first and last. | 208 | * but just first and last. |
198 | */ | 209 | */ |
199 | if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) { | 210 | if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) { |
200 | dprintk("section ts padding loss: %d/%d\n", | 211 | set_buf_flags(feed, |
201 | n, sec->tsfeedp); | 212 | DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); |
202 | dprintk("pad data: %*ph\n", n, sec->secbuf); | 213 | dprintk_sect_loss("section ts padding loss: %d/%d\n", |
214 | n, sec->tsfeedp); | ||
215 | dprintk_sect_loss("pad data: %*ph\n", n, sec->secbuf); | ||
203 | } | 216 | } |
204 | } | 217 | } |
205 | #endif | ||
206 | 218 | ||
207 | sec->tsfeedp = sec->secbufp = sec->seclen = 0; | 219 | sec->tsfeedp = sec->secbufp = sec->seclen = 0; |
208 | sec->secbuf = sec->secbuf_base; | 220 | sec->secbuf = sec->secbuf_base; |
@@ -237,11 +249,10 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed, | |||
237 | return 0; | 249 | return 0; |
238 | 250 | ||
239 | if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) { | 251 | if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) { |
240 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | 252 | set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); |
241 | dprintk("section buffer full loss: %d/%d\n", | 253 | dprintk_sect_loss("section buffer full loss: %d/%d\n", |
242 | sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE, | 254 | sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE, |
243 | DMX_MAX_SECFEED_SIZE); | 255 | DMX_MAX_SECFEED_SIZE); |
244 | #endif | ||
245 | len = DMX_MAX_SECFEED_SIZE - sec->tsfeedp; | 256 | len = DMX_MAX_SECFEED_SIZE - sec->tsfeedp; |
246 | } | 257 | } |
247 | 258 | ||
@@ -269,12 +280,13 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed, | |||
269 | sec->seclen = seclen; | 280 | sec->seclen = seclen; |
270 | sec->crc_val = ~0; | 281 | sec->crc_val = ~0; |
271 | /* dump [secbuf .. secbuf+seclen) */ | 282 | /* dump [secbuf .. secbuf+seclen) */ |
272 | if (feed->pusi_seen) | 283 | if (feed->pusi_seen) { |
273 | dvb_dmx_swfilter_section_feed(feed); | 284 | dvb_dmx_swfilter_section_feed(feed); |
274 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | 285 | } else { |
275 | else | 286 | set_buf_flags(feed, |
276 | dprintk("pusi not seen, discarding section data\n"); | 287 | DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); |
277 | #endif | 288 | dprintk_sect_loss("pusi not seen, discarding section data\n"); |
289 | } | ||
278 | sec->secbufp += seclen; /* secbufp and secbuf moving together is */ | 290 | sec->secbufp += seclen; /* secbufp and secbuf moving together is */ |
279 | sec->secbuf += seclen; /* redundant but saves pointer arithmetic */ | 291 | sec->secbuf += seclen; /* redundant but saves pointer arithmetic */ |
280 | } | 292 | } |
@@ -307,18 +319,22 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, | |||
307 | } | 319 | } |
308 | 320 | ||
309 | if (!ccok || dc_i) { | 321 | if (!ccok || dc_i) { |
310 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | 322 | if (dc_i) { |
311 | if (dc_i) | 323 | set_buf_flags(feed, |
312 | dprintk("%d frame with disconnect indicator\n", | 324 | DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR); |
325 | dprintk_sect_loss("%d frame with disconnect indicator\n", | ||
313 | cc); | 326 | cc); |
314 | else | 327 | } else { |
315 | dprintk("discontinuity: %d instead of %d. %d bytes lost\n", | 328 | set_buf_flags(feed, |
329 | DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); | ||
330 | dprintk_sect_loss("discontinuity: %d instead of %d. %d bytes lost\n", | ||
316 | cc, (feed->cc + 1) & 0x0f, count + 4); | 331 | cc, (feed->cc + 1) & 0x0f, count + 4); |
332 | } | ||
317 | /* | 333 | /* |
318 | * those bytes under sume circumstances will again be reported | 334 | * those bytes under some circumstances will again be reported |
319 | * in the following dvb_dmx_swfilter_section_new | 335 | * in the following dvb_dmx_swfilter_section_new |
320 | */ | 336 | */ |
321 | #endif | 337 | |
322 | /* | 338 | /* |
323 | * Discontinuity detected. Reset pusi_seen to | 339 | * Discontinuity detected. Reset pusi_seen to |
324 | * stop feeding of suspicious data until next PUSI=1 arrives | 340 | * stop feeding of suspicious data until next PUSI=1 arrives |
@@ -326,6 +342,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, | |||
326 | * FIXME: does it make sense if the MPEG-TS is the one | 342 | * FIXME: does it make sense if the MPEG-TS is the one |
327 | * reporting discontinuity? | 343 | * reporting discontinuity? |
328 | */ | 344 | */ |
345 | |||
329 | feed->pusi_seen = false; | 346 | feed->pusi_seen = false; |
330 | dvb_dmx_swfilter_section_new(feed); | 347 | dvb_dmx_swfilter_section_new(feed); |
331 | } | 348 | } |
@@ -345,11 +362,11 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, | |||
345 | dvb_dmx_swfilter_section_new(feed); | 362 | dvb_dmx_swfilter_section_new(feed); |
346 | dvb_dmx_swfilter_section_copy_dump(feed, after, | 363 | dvb_dmx_swfilter_section_copy_dump(feed, after, |
347 | after_len); | 364 | after_len); |
365 | } else if (count > 0) { | ||
366 | set_buf_flags(feed, | ||
367 | DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); | ||
368 | dprintk_sect_loss("PUSI=1 but %d bytes lost\n", count); | ||
348 | } | 369 | } |
349 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | ||
350 | else if (count > 0) | ||
351 | dprintk("PUSI=1 but %d bytes lost\n", count); | ||
352 | #endif | ||
353 | } else { | 370 | } else { |
354 | /* PUSI=0 (is not set), no section boundary */ | 371 | /* PUSI=0 (is not set), no section boundary */ |
355 | dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count); | 372 | dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count); |
@@ -369,7 +386,8 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed, | |||
369 | if (feed->ts_type & TS_PAYLOAD_ONLY) | 386 | if (feed->ts_type & TS_PAYLOAD_ONLY) |
370 | dvb_dmx_swfilter_payload(feed, buf); | 387 | dvb_dmx_swfilter_payload(feed, buf); |
371 | else | 388 | else |
372 | feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); | 389 | feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, |
390 | &feed->buffer_flags); | ||
373 | } | 391 | } |
374 | /* Used only on full-featured devices */ | 392 | /* Used only on full-featured devices */ |
375 | if (feed->ts_type & TS_DECODER) | 393 | if (feed->ts_type & TS_DECODER) |
@@ -430,6 +448,11 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
430 | } | 448 | } |
431 | 449 | ||
432 | if (buf[1] & 0x80) { | 450 | if (buf[1] & 0x80) { |
451 | list_for_each_entry(feed, &demux->feed_list, list_head) { | ||
452 | if ((feed->pid != pid) && (feed->pid != 0x2000)) | ||
453 | continue; | ||
454 | set_buf_flags(feed, DMX_BUFFER_FLAG_TEI); | ||
455 | } | ||
433 | dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n", | 456 | dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n", |
434 | pid, buf[1]); | 457 | pid, buf[1]); |
435 | /* data in this packet can't be trusted - drop it unless | 458 | /* data in this packet can't be trusted - drop it unless |
@@ -445,6 +468,13 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
445 | (demux->cnt_storage[pid] + 1) & 0xf; | 468 | (demux->cnt_storage[pid] + 1) & 0xf; |
446 | 469 | ||
447 | if ((buf[3] & 0xf) != demux->cnt_storage[pid]) { | 470 | if ((buf[3] & 0xf) != demux->cnt_storage[pid]) { |
471 | list_for_each_entry(feed, &demux->feed_list, list_head) { | ||
472 | if ((feed->pid != pid) && (feed->pid != 0x2000)) | ||
473 | continue; | ||
474 | set_buf_flags(feed, | ||
475 | DMX_BUFFER_PKT_COUNTER_MISMATCH); | ||
476 | } | ||
477 | |||
448 | dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n", | 478 | dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n", |
449 | pid, demux->cnt_storage[pid], | 479 | pid, demux->cnt_storage[pid], |
450 | buf[3] & 0xf); | 480 | buf[3] & 0xf); |
@@ -466,7 +496,8 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
466 | if (feed->pid == pid) | 496 | if (feed->pid == pid) |
467 | dvb_dmx_swfilter_packet_type(feed, buf); | 497 | dvb_dmx_swfilter_packet_type(feed, buf); |
468 | else if (feed->pid == 0x2000) | 498 | else if (feed->pid == 0x2000) |
469 | feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); | 499 | feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, |
500 | &feed->buffer_flags); | ||
470 | } | 501 | } |
471 | } | 502 | } |
472 | 503 | ||
@@ -585,7 +616,8 @@ void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count) | |||
585 | 616 | ||
586 | spin_lock_irqsave(&demux->lock, flags); | 617 | spin_lock_irqsave(&demux->lock, flags); |
587 | 618 | ||
588 | demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts); | 619 | demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, |
620 | &demux->feed->buffer_flags); | ||
589 | 621 | ||
590 | spin_unlock_irqrestore(&demux->lock, flags); | 622 | spin_unlock_irqrestore(&demux->lock, flags); |
591 | } | 623 | } |
@@ -785,6 +817,7 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx, | |||
785 | feed->demux = demux; | 817 | feed->demux = demux; |
786 | feed->pid = 0xffff; | 818 | feed->pid = 0xffff; |
787 | feed->peslen = 0xfffa; | 819 | feed->peslen = 0xfffa; |
820 | feed->buffer_flags = 0; | ||
788 | 821 | ||
789 | (*ts_feed) = &feed->feed.ts; | 822 | (*ts_feed) = &feed->feed.ts; |
790 | (*ts_feed)->parent = dmx; | 823 | (*ts_feed)->parent = dmx; |
@@ -1042,6 +1075,7 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux, | |||
1042 | dvbdmxfeed->cb.sec = callback; | 1075 | dvbdmxfeed->cb.sec = callback; |
1043 | dvbdmxfeed->demux = dvbdmx; | 1076 | dvbdmxfeed->demux = dvbdmx; |
1044 | dvbdmxfeed->pid = 0xffff; | 1077 | dvbdmxfeed->pid = 0xffff; |
1078 | dvbdmxfeed->buffer_flags = 0; | ||
1045 | dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base; | 1079 | dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base; |
1046 | dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0; | 1080 | dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0; |
1047 | dvbdmxfeed->feed.sec.tsfeedp = 0; | 1081 | dvbdmxfeed->feed.sec.tsfeedp = 0; |
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index b6c7eec863b9..ba39f9942e1d 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c | |||
@@ -883,7 +883,8 @@ static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len) | |||
883 | 883 | ||
884 | static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len, | 884 | static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len, |
885 | const u8 *buffer2, size_t buffer2_len, | 885 | const u8 *buffer2, size_t buffer2_len, |
886 | struct dmx_ts_feed *feed) | 886 | struct dmx_ts_feed *feed, |
887 | u32 *buffer_flags) | ||
887 | { | 888 | { |
888 | struct net_device *dev = feed->priv; | 889 | struct net_device *dev = feed->priv; |
889 | 890 | ||
@@ -992,7 +993,7 @@ static void dvb_net_sec(struct net_device *dev, | |||
992 | 993 | ||
993 | static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len, | 994 | static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len, |
994 | const u8 *buffer2, size_t buffer2_len, | 995 | const u8 *buffer2, size_t buffer2_len, |
995 | struct dmx_section_filter *filter) | 996 | struct dmx_section_filter *filter, u32 *buffer_flags) |
996 | { | 997 | { |
997 | struct net_device *dev = filter->priv; | 998 | struct net_device *dev = filter->priv; |
998 | 999 | ||
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index 763145d74e83..b811adf88afa 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c | |||
@@ -256,7 +256,8 @@ int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, | 258 | int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, |
259 | const unsigned char *src, int len) | 259 | const unsigned char *src, int len, |
260 | enum dmx_buffer_flags *buffer_flags) | ||
260 | { | 261 | { |
261 | unsigned long flags = 0; | 262 | unsigned long flags = 0; |
262 | void *vbuf = NULL; | 263 | void *vbuf = NULL; |
@@ -264,15 +265,17 @@ int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, | |||
264 | unsigned char *psrc = (unsigned char *)src; | 265 | unsigned char *psrc = (unsigned char *)src; |
265 | int ll = 0; | 266 | int ll = 0; |
266 | 267 | ||
267 | dprintk(3, "[%s] %d bytes are rcvd\n", ctx->name, len); | 268 | /* |
268 | if (!src) { | 269 | * normal case: This func is called twice from demux driver |
269 | dprintk(3, "[%s]:NULL pointer src\n", ctx->name); | 270 | * one with valid src pointer, second time with NULL pointer |
270 | /**normal case: This func is called twice from demux driver | 271 | */ |
271 | * once with valid src pointer, second time with NULL pointer | 272 | if (!src || !len) |
272 | */ | ||
273 | return 0; | 273 | return 0; |
274 | } | ||
275 | spin_lock_irqsave(&ctx->slock, flags); | 274 | spin_lock_irqsave(&ctx->slock, flags); |
275 | if (buffer_flags && *buffer_flags) { | ||
276 | ctx->flags |= *buffer_flags; | ||
277 | *buffer_flags = 0; | ||
278 | } | ||
276 | while (todo) { | 279 | while (todo) { |
277 | if (!ctx->buf) { | 280 | if (!ctx->buf) { |
278 | if (list_empty(&ctx->dvb_q)) { | 281 | if (list_empty(&ctx->dvb_q)) { |
@@ -395,6 +398,7 @@ int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) | |||
395 | 398 | ||
396 | int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) | 399 | int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) |
397 | { | 400 | { |
401 | unsigned long flags; | ||
398 | int ret; | 402 | int ret; |
399 | 403 | ||
400 | ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking); | 404 | ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking); |
@@ -402,7 +406,16 @@ int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) | |||
402 | dprintk(1, "[%s] errno=%d\n", ctx->name, ret); | 406 | dprintk(1, "[%s] errno=%d\n", ctx->name, ret); |
403 | return ret; | 407 | return ret; |
404 | } | 408 | } |
405 | dprintk(5, "[%s] index=%d\n", ctx->name, b->index); | 409 | |
410 | spin_lock_irqsave(&ctx->slock, flags); | ||
411 | b->count = ctx->count++; | ||
412 | b->flags = ctx->flags; | ||
413 | ctx->flags = 0; | ||
414 | spin_unlock_irqrestore(&ctx->slock, flags); | ||
415 | |||
416 | dprintk(5, "[%s] index=%d, count=%d, flags=%d\n", | ||
417 | ctx->name, b->index, ctx->count, b->flags); | ||
418 | |||
406 | 419 | ||
407 | return 0; | 420 | return 0; |
408 | } | 421 | } |
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index 50bce68ffd66..65d157fe76d1 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c | |||
@@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan) | |||
1262 | * New users must use I2C client binding directly! | 1262 | * New users must use I2C client binding directly! |
1263 | */ | 1263 | */ |
1264 | struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, | 1264 | struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, |
1265 | struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter) | 1265 | struct i2c_adapter *i2c, |
1266 | struct i2c_adapter **tuner_i2c_adapter) | ||
1266 | { | 1267 | { |
1267 | struct i2c_client *client; | 1268 | struct i2c_client *client; |
1268 | struct i2c_board_info board_info; | 1269 | struct i2c_board_info board_info; |
1269 | struct m88ds3103_platform_data pdata; | 1270 | struct m88ds3103_platform_data pdata = {}; |
1270 | 1271 | ||
1271 | pdata.clk = cfg->clock; | 1272 | pdata.clk = cfg->clock; |
1272 | pdata.i2c_wr_max = cfg->i2c_wr_max; | 1273 | pdata.i2c_wr_max = cfg->i2c_wr_max; |
@@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client, | |||
1409 | case M88DS3103_CHIP_ID: | 1410 | case M88DS3103_CHIP_ID: |
1410 | break; | 1411 | break; |
1411 | default: | 1412 | default: |
1413 | ret = -ENODEV; | ||
1414 | dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id); | ||
1412 | goto err_kfree; | 1415 | goto err_kfree; |
1413 | } | 1416 | } |
1414 | 1417 | ||
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 3c1851984b90..2476d812f669 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c | |||
@@ -505,80 +505,77 @@ static struct i2c_vbi_ram_value vbi_ram_default[] = | |||
505 | /* FIXME: Current api doesn't handle all VBI types, those not | 505 | /* FIXME: Current api doesn't handle all VBI types, those not |
506 | yet supported are placed under #if 0 */ | 506 | yet supported are placed under #if 0 */ |
507 | #if 0 | 507 | #if 0 |
508 | {0x010, /* Teletext, SECAM, WST System A */ | 508 | [0] = {0x010, /* Teletext, SECAM, WST System A */ |
509 | {V4L2_SLICED_TELETEXT_SECAM,6,23,1}, | 509 | {V4L2_SLICED_TELETEXT_SECAM,6,23,1}, |
510 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26, | 510 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26, |
511 | 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 } | 511 | 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 } |
512 | }, | 512 | }, |
513 | #endif | 513 | #endif |
514 | {0x030, /* Teletext, PAL, WST System B */ | 514 | [1] = {0x030, /* Teletext, PAL, WST System B */ |
515 | {V4L2_SLICED_TELETEXT_B,6,22,1}, | 515 | {V4L2_SLICED_TELETEXT_B,6,22,1}, |
516 | { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b, | 516 | { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b, |
517 | 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 } | 517 | 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 } |
518 | }, | 518 | }, |
519 | #if 0 | 519 | #if 0 |
520 | {0x050, /* Teletext, PAL, WST System C */ | 520 | [2] = {0x050, /* Teletext, PAL, WST System C */ |
521 | {V4L2_SLICED_TELETEXT_PAL_C,6,22,1}, | 521 | {V4L2_SLICED_TELETEXT_PAL_C,6,22,1}, |
522 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, | 522 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, |
523 | 0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } | 523 | 0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } |
524 | }, | 524 | }, |
525 | {0x070, /* Teletext, NTSC, WST System B */ | 525 | [3] = {0x070, /* Teletext, NTSC, WST System B */ |
526 | {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1}, | 526 | {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1}, |
527 | { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23, | 527 | { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23, |
528 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } | 528 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } |
529 | }, | 529 | }, |
530 | {0x090, /* Tetetext, NTSC NABTS System C */ | 530 | [4] = {0x090, /* Tetetext, NTSC NABTS System C */ |
531 | {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1}, | 531 | {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1}, |
532 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, | 532 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, |
533 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 } | 533 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 } |
534 | }, | 534 | }, |
535 | {0x0b0, /* Teletext, NTSC-J, NABTS System D */ | 535 | [5] = {0x0b0, /* Teletext, NTSC-J, NABTS System D */ |
536 | {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1}, | 536 | {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1}, |
537 | { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23, | 537 | { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23, |
538 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } | 538 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } |
539 | }, | 539 | }, |
540 | {0x0d0, /* Closed Caption, PAL/SECAM */ | 540 | [6] = {0x0d0, /* Closed Caption, PAL/SECAM */ |
541 | {V4L2_SLICED_CAPTION_625,22,22,1}, | 541 | {V4L2_SLICED_CAPTION_625,22,22,1}, |
542 | { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, | 542 | { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, |
543 | 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } | 543 | 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } |
544 | }, | 544 | }, |
545 | #endif | 545 | #endif |
546 | {0x0f0, /* Closed Caption, NTSC */ | 546 | [7] = {0x0f0, /* Closed Caption, NTSC */ |
547 | {V4L2_SLICED_CAPTION_525,21,21,1}, | 547 | {V4L2_SLICED_CAPTION_525,21,21,1}, |
548 | { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, | 548 | { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, |
549 | 0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } | 549 | 0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } |
550 | }, | 550 | }, |
551 | {0x110, /* Wide Screen Signal, PAL/SECAM */ | 551 | [8] = {0x110, /* Wide Screen Signal, PAL/SECAM */ |
552 | {V4L2_SLICED_WSS_625,23,23,1}, | 552 | {V4L2_SLICED_WSS_625,23,23,1}, |
553 | { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42, | 553 | { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42, |
554 | 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 } | 554 | 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 } |
555 | }, | 555 | }, |
556 | #if 0 | 556 | #if 0 |
557 | {0x130, /* Wide Screen Signal, NTSC C */ | 557 | [9] = {0x130, /* Wide Screen Signal, NTSC C */ |
558 | {V4L2_SLICED_WSS_525,20,20,1}, | 558 | {V4L2_SLICED_WSS_525,20,20,1}, |
559 | { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43, | 559 | { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43, |
560 | 0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 } | 560 | 0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 } |
561 | }, | 561 | }, |
562 | {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */ | 562 | [10] = {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */ |
563 | {V4l2_SLICED_VITC_625,6,22,0}, | 563 | {V4l2_SLICED_VITC_625,6,22,0}, |
564 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, | 564 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, |
565 | 0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } | 565 | 0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } |
566 | }, | 566 | }, |
567 | {0x170, /* Vertical Interval Timecode (VITC), NTSC */ | 567 | [11] = {0x170, /* Vertical Interval Timecode (VITC), NTSC */ |
568 | {V4l2_SLICED_VITC_525,10,20,0}, | 568 | {V4l2_SLICED_VITC_525,10,20,0}, |
569 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, | 569 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, |
570 | 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } | 570 | 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } |
571 | }, | 571 | }, |
572 | #endif | 572 | #endif |
573 | {0x190, /* Video Program System (VPS), PAL */ | 573 | [12] = {0x190, /* Video Program System (VPS), PAL */ |
574 | {V4L2_SLICED_VPS,16,16,0}, | 574 | {V4L2_SLICED_VPS,16,16,0}, |
575 | { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d, | 575 | { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d, |
576 | 0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 } | 576 | 0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 } |
577 | }, | 577 | }, |
578 | /* 0x1d0 User programmable */ | 578 | /* 0x1d0 User programmable */ |
579 | |||
580 | /* End of struct */ | ||
581 | { (u16)-1 } | ||
582 | }; | 579 | }; |
583 | 580 | ||
584 | static int tvp5150_write_inittab(struct v4l2_subdev *sd, | 581 | static int tvp5150_write_inittab(struct v4l2_subdev *sd, |
@@ -591,10 +588,10 @@ static int tvp5150_write_inittab(struct v4l2_subdev *sd, | |||
591 | return 0; | 588 | return 0; |
592 | } | 589 | } |
593 | 590 | ||
594 | static int tvp5150_vdp_init(struct v4l2_subdev *sd, | 591 | static int tvp5150_vdp_init(struct v4l2_subdev *sd) |
595 | const struct i2c_vbi_ram_value *regs) | ||
596 | { | 592 | { |
597 | unsigned int i; | 593 | unsigned int i; |
594 | int j; | ||
598 | 595 | ||
599 | /* Disable Full Field */ | 596 | /* Disable Full Field */ |
600 | tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0); | 597 | tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0); |
@@ -604,14 +601,17 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd, | |||
604 | tvp5150_write(sd, i, 0xff); | 601 | tvp5150_write(sd, i, 0xff); |
605 | 602 | ||
606 | /* Load Ram Table */ | 603 | /* Load Ram Table */ |
607 | while (regs->reg != (u16)-1) { | 604 | for (j = 0; j < ARRAY_SIZE(vbi_ram_default); j++) { |
605 | const struct i2c_vbi_ram_value *regs = &vbi_ram_default[j]; | ||
606 | |||
607 | if (!regs->type.vbi_type) | ||
608 | continue; | ||
609 | |||
608 | tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8); | 610 | tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8); |
609 | tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg); | 611 | tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg); |
610 | 612 | ||
611 | for (i = 0; i < 16; i++) | 613 | for (i = 0; i < 16; i++) |
612 | tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]); | 614 | tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]); |
613 | |||
614 | regs++; | ||
615 | } | 615 | } |
616 | return 0; | 616 | return 0; |
617 | } | 617 | } |
@@ -620,19 +620,23 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd, | |||
620 | static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, | 620 | static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, |
621 | struct v4l2_sliced_vbi_cap *cap) | 621 | struct v4l2_sliced_vbi_cap *cap) |
622 | { | 622 | { |
623 | const struct i2c_vbi_ram_value *regs = vbi_ram_default; | 623 | int line, i; |
624 | int line; | ||
625 | 624 | ||
626 | dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n"); | 625 | dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n"); |
627 | memset(cap, 0, sizeof *cap); | 626 | memset(cap, 0, sizeof *cap); |
628 | 627 | ||
629 | while (regs->reg != (u16)-1 ) { | 628 | for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) { |
630 | for (line=regs->type.ini_line;line<=regs->type.end_line;line++) { | 629 | const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i]; |
630 | |||
631 | if (!regs->type.vbi_type) | ||
632 | continue; | ||
633 | |||
634 | for (line = regs->type.ini_line; | ||
635 | line <= regs->type.end_line; | ||
636 | line++) { | ||
631 | cap->service_lines[0][line] |= regs->type.vbi_type; | 637 | cap->service_lines[0][line] |= regs->type.vbi_type; |
632 | } | 638 | } |
633 | cap->service_set |= regs->type.vbi_type; | 639 | cap->service_set |= regs->type.vbi_type; |
634 | |||
635 | regs++; | ||
636 | } | 640 | } |
637 | return 0; | 641 | return 0; |
638 | } | 642 | } |
@@ -651,14 +655,13 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, | |||
651 | * MSB = field2 | 655 | * MSB = field2 |
652 | */ | 656 | */ |
653 | static int tvp5150_set_vbi(struct v4l2_subdev *sd, | 657 | static int tvp5150_set_vbi(struct v4l2_subdev *sd, |
654 | const struct i2c_vbi_ram_value *regs, | ||
655 | unsigned int type,u8 flags, int line, | 658 | unsigned int type,u8 flags, int line, |
656 | const int fields) | 659 | const int fields) |
657 | { | 660 | { |
658 | struct tvp5150 *decoder = to_tvp5150(sd); | 661 | struct tvp5150 *decoder = to_tvp5150(sd); |
659 | v4l2_std_id std = decoder->norm; | 662 | v4l2_std_id std = decoder->norm; |
660 | u8 reg; | 663 | u8 reg; |
661 | int pos = 0; | 664 | int i, pos = 0; |
662 | 665 | ||
663 | if (std == V4L2_STD_ALL) { | 666 | if (std == V4L2_STD_ALL) { |
664 | dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); | 667 | dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); |
@@ -671,19 +674,19 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, | |||
671 | if (line < 6 || line > 27) | 674 | if (line < 6 || line > 27) |
672 | return 0; | 675 | return 0; |
673 | 676 | ||
674 | while (regs->reg != (u16)-1) { | 677 | for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) { |
678 | const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i]; | ||
679 | |||
680 | if (!regs->type.vbi_type) | ||
681 | continue; | ||
682 | |||
675 | if ((type & regs->type.vbi_type) && | 683 | if ((type & regs->type.vbi_type) && |
676 | (line >= regs->type.ini_line) && | 684 | (line >= regs->type.ini_line) && |
677 | (line <= regs->type.end_line)) | 685 | (line <= regs->type.end_line)) |
678 | break; | 686 | break; |
679 | |||
680 | regs++; | ||
681 | pos++; | 687 | pos++; |
682 | } | 688 | } |
683 | 689 | ||
684 | if (regs->reg == (u16)-1) | ||
685 | return 0; | ||
686 | |||
687 | type = pos | (flags & 0xf0); | 690 | type = pos | (flags & 0xf0); |
688 | reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; | 691 | reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; |
689 | 692 | ||
@@ -696,8 +699,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, | |||
696 | return type; | 699 | return type; |
697 | } | 700 | } |
698 | 701 | ||
699 | static int tvp5150_get_vbi(struct v4l2_subdev *sd, | 702 | static int tvp5150_get_vbi(struct v4l2_subdev *sd, int line) |
700 | const struct i2c_vbi_ram_value *regs, int line) | ||
701 | { | 703 | { |
702 | struct tvp5150 *decoder = to_tvp5150(sd); | 704 | struct tvp5150 *decoder = to_tvp5150(sd); |
703 | v4l2_std_id std = decoder->norm; | 705 | v4l2_std_id std = decoder->norm; |
@@ -726,8 +728,8 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd, | |||
726 | return 0; | 728 | return 0; |
727 | } | 729 | } |
728 | pos = ret & 0x0f; | 730 | pos = ret & 0x0f; |
729 | if (pos < 0x0f) | 731 | if (pos < ARRAY_SIZE(vbi_ram_default)) |
730 | type |= regs[pos].type.vbi_type; | 732 | type |= vbi_ram_default[pos].type.vbi_type; |
731 | } | 733 | } |
732 | 734 | ||
733 | return type; | 735 | return type; |
@@ -788,7 +790,7 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val) | |||
788 | tvp5150_write_inittab(sd, tvp5150_init_default); | 790 | tvp5150_write_inittab(sd, tvp5150_init_default); |
789 | 791 | ||
790 | /* Initializes VDP registers */ | 792 | /* Initializes VDP registers */ |
791 | tvp5150_vdp_init(sd, vbi_ram_default); | 793 | tvp5150_vdp_init(sd); |
792 | 794 | ||
793 | /* Selects decoder input */ | 795 | /* Selects decoder input */ |
794 | tvp5150_selmux(sd); | 796 | tvp5150_selmux(sd); |
@@ -1121,8 +1123,8 @@ static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f | |||
1121 | for (i = 0; i <= 23; i++) { | 1123 | for (i = 0; i <= 23; i++) { |
1122 | svbi->service_lines[1][i] = 0; | 1124 | svbi->service_lines[1][i] = 0; |
1123 | svbi->service_lines[0][i] = | 1125 | svbi->service_lines[0][i] = |
1124 | tvp5150_set_vbi(sd, vbi_ram_default, | 1126 | tvp5150_set_vbi(sd, svbi->service_lines[0][i], |
1125 | svbi->service_lines[0][i], 0xf0, i, 3); | 1127 | 0xf0, i, 3); |
1126 | } | 1128 | } |
1127 | /* Enables FIFO */ | 1129 | /* Enables FIFO */ |
1128 | tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1); | 1130 | tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1); |
@@ -1148,7 +1150,7 @@ static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f | |||
1148 | 1150 | ||
1149 | for (i = 0; i <= 23; i++) { | 1151 | for (i = 0; i <= 23; i++) { |
1150 | svbi->service_lines[0][i] = | 1152 | svbi->service_lines[0][i] = |
1151 | tvp5150_get_vbi(sd, vbi_ram_default, i); | 1153 | tvp5150_get_vbi(sd, i); |
1152 | mask |= svbi->service_lines[0][i]; | 1154 | mask |= svbi->service_lines[0][i]; |
1153 | } | 1155 | } |
1154 | svbi->service_set = mask; | 1156 | svbi->service_set = mask; |
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index dc8e577b2f74..d6816effb878 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c | |||
@@ -324,14 +324,15 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len, | |||
324 | } | 324 | } |
325 | return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len, | 325 | return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len, |
326 | buffer2, buffer2_len, | 326 | buffer2, buffer2_len, |
327 | &dvbdmxfilter->filter); | 327 | &dvbdmxfilter->filter, NULL); |
328 | case DMX_TYPE_TS: | 328 | case DMX_TYPE_TS: |
329 | if (!(dvbdmxfilter->feed->ts_type & TS_PACKET)) | 329 | if (!(dvbdmxfilter->feed->ts_type & TS_PACKET)) |
330 | return 0; | 330 | return 0; |
331 | if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY) | 331 | if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY) |
332 | return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len, | 332 | return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len, |
333 | buffer2, buffer2_len, | 333 | buffer2, buffer2_len, |
334 | &dvbdmxfilter->feed->feed.ts); | 334 | &dvbdmxfilter->feed->feed.ts, |
335 | NULL); | ||
335 | else | 336 | else |
336 | av7110_p2t_write(buffer1, buffer1_len, | 337 | av7110_p2t_write(buffer1, buffer1_len, |
337 | dvbdmxfilter->feed->pid, | 338 | dvbdmxfilter->feed->pid, |
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c index 4daba76ec240..ef1bc17cdc4d 100644 --- a/drivers/media/pci/ttpci/av7110_av.c +++ b/drivers/media/pci/ttpci/av7110_av.c | |||
@@ -99,7 +99,7 @@ int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len) | |||
99 | buf[4] = buf[5] = 0; | 99 | buf[4] = buf[5] = 0; |
100 | if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY) | 100 | if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY) |
101 | return dvbdmxfeed->cb.ts(buf, len, NULL, 0, | 101 | return dvbdmxfeed->cb.ts(buf, len, NULL, 0, |
102 | &dvbdmxfeed->feed.ts); | 102 | &dvbdmxfeed->feed.ts, NULL); |
103 | else | 103 | else |
104 | return dvb_filter_pes2ts(p2t, buf, len, 1); | 104 | return dvb_filter_pes2ts(p2t, buf, len, 1); |
105 | } | 105 | } |
@@ -109,7 +109,7 @@ static int dvb_filter_pes2ts_cb(void *priv, unsigned char *data) | |||
109 | struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv; | 109 | struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv; |
110 | 110 | ||
111 | dvbdmxfeed->cb.ts(data, 188, NULL, 0, | 111 | dvbdmxfeed->cb.ts(data, 188, NULL, 0, |
112 | &dvbdmxfeed->feed.ts); | 112 | &dvbdmxfeed->feed.ts, NULL); |
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
115 | 115 | ||
@@ -814,7 +814,7 @@ static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter, | |||
814 | memcpy(obuf + l, buf + c, TS_SIZE - l); | 814 | memcpy(obuf + l, buf + c, TS_SIZE - l); |
815 | c = length; | 815 | c = length; |
816 | } | 816 | } |
817 | feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts); | 817 | feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, NULL); |
818 | pes_start = 0; | 818 | pes_start = 0; |
819 | } | 819 | } |
820 | } | 820 | } |
diff --git a/drivers/media/usb/au0828/Kconfig b/drivers/media/usb/au0828/Kconfig index 70521e0b4c53..bfaa806633df 100644 --- a/drivers/media/usb/au0828/Kconfig +++ b/drivers/media/usb/au0828/Kconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | 1 | ||
2 | config VIDEO_AU0828 | 2 | config VIDEO_AU0828 |
3 | tristate "Auvitek AU0828 support" | 3 | tristate "Auvitek AU0828 support" |
4 | depends on I2C && INPUT && DVB_CORE && USB | 4 | depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2 |
5 | select I2C_ALGOBIT | 5 | select I2C_ALGOBIT |
6 | select VIDEO_TVEEPROM | 6 | select VIDEO_TVEEPROM |
7 | select VIDEOBUF2_VMALLOC | 7 | select VIDEOBUF2_VMALLOC |
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index a8900f5571f7..44ca66cb9b8f 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c | |||
@@ -428,7 +428,7 @@ static int ttusb_dec_audio_pes2ts_cb(void *priv, unsigned char *data) | |||
428 | struct ttusb_dec *dec = priv; | 428 | struct ttusb_dec *dec = priv; |
429 | 429 | ||
430 | dec->audio_filter->feed->cb.ts(data, 188, NULL, 0, | 430 | dec->audio_filter->feed->cb.ts(data, 188, NULL, 0, |
431 | &dec->audio_filter->feed->feed.ts); | 431 | &dec->audio_filter->feed->feed.ts, NULL); |
432 | 432 | ||
433 | return 0; | 433 | return 0; |
434 | } | 434 | } |
@@ -438,7 +438,7 @@ static int ttusb_dec_video_pes2ts_cb(void *priv, unsigned char *data) | |||
438 | struct ttusb_dec *dec = priv; | 438 | struct ttusb_dec *dec = priv; |
439 | 439 | ||
440 | dec->video_filter->feed->cb.ts(data, 188, NULL, 0, | 440 | dec->video_filter->feed->cb.ts(data, 188, NULL, 0, |
441 | &dec->video_filter->feed->feed.ts); | 441 | &dec->video_filter->feed->feed.ts, NULL); |
442 | 442 | ||
443 | return 0; | 443 | return 0; |
444 | } | 444 | } |
@@ -490,7 +490,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length) | |||
490 | 490 | ||
491 | if (output_pva) { | 491 | if (output_pva) { |
492 | dec->video_filter->feed->cb.ts(pva, length, NULL, 0, | 492 | dec->video_filter->feed->cb.ts(pva, length, NULL, 0, |
493 | &dec->video_filter->feed->feed.ts); | 493 | &dec->video_filter->feed->feed.ts, NULL); |
494 | return; | 494 | return; |
495 | } | 495 | } |
496 | 496 | ||
@@ -551,7 +551,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length) | |||
551 | case 0x02: /* MainAudioStream */ | 551 | case 0x02: /* MainAudioStream */ |
552 | if (output_pva) { | 552 | if (output_pva) { |
553 | dec->audio_filter->feed->cb.ts(pva, length, NULL, 0, | 553 | dec->audio_filter->feed->cb.ts(pva, length, NULL, 0, |
554 | &dec->audio_filter->feed->feed.ts); | 554 | &dec->audio_filter->feed->feed.ts, NULL); |
555 | return; | 555 | return; |
556 | } | 556 | } |
557 | 557 | ||
@@ -589,7 +589,7 @@ static void ttusb_dec_process_filter(struct ttusb_dec *dec, u8 *packet, | |||
589 | 589 | ||
590 | if (filter) | 590 | if (filter) |
591 | filter->feed->cb.sec(&packet[2], length - 2, NULL, 0, | 591 | filter->feed->cb.sec(&packet[2], length - 2, NULL, 0, |
592 | &filter->filter); | 592 | &filter->filter, NULL); |
593 | } | 593 | } |
594 | 594 | ||
595 | static void ttusb_dec_process_packet(struct ttusb_dec *dec) | 595 | static void ttusb_dec_process_packet(struct ttusb_dec *dec) |
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index bf52fbd07aed..8e37e7c5e0f7 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig | |||
@@ -7,6 +7,7 @@ config VIDEO_V4L2 | |||
7 | tristate | 7 | tristate |
8 | depends on (I2C || I2C=n) && VIDEO_DEV | 8 | depends on (I2C || I2C=n) && VIDEO_DEV |
9 | select RATIONAL | 9 | select RATIONAL |
10 | select VIDEOBUF2_V4L2 if VIDEOBUF2_CORE | ||
10 | default (I2C || I2C=n) && VIDEO_DEV | 11 | default (I2C || I2C=n) && VIDEO_DEV |
11 | 12 | ||
12 | config VIDEO_ADV_DEBUG | 13 | config VIDEO_ADV_DEBUG |
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile index 80de2cb9c476..7df54582e956 100644 --- a/drivers/media/v4l2-core/Makefile +++ b/drivers/media/v4l2-core/Makefile | |||
@@ -13,7 +13,7 @@ ifeq ($(CONFIG_COMPAT),y) | |||
13 | endif | 13 | endif |
14 | obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o | 14 | obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o |
15 | ifeq ($(CONFIG_TRACEPOINTS),y) | 15 | ifeq ($(CONFIG_TRACEPOINTS),y) |
16 | videodev-objs += vb2-trace.o v4l2-trace.o | 16 | videodev-objs += v4l2-trace.o |
17 | endif | 17 | endif |
18 | videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o | 18 | videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o |
19 | 19 | ||
@@ -35,4 +35,3 @@ obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o | |||
35 | 35 | ||
36 | ccflags-y += -I$(srctree)/drivers/media/dvb-frontends | 36 | ccflags-y += -I$(srctree)/drivers/media/dvb-frontends |
37 | ccflags-y += -I$(srctree)/drivers/media/tuners | 37 | ccflags-y += -I$(srctree)/drivers/media/tuners |
38 | |||
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c index 0a7bdbed3a6f..e9c1485c32b9 100644 --- a/drivers/memory/brcmstb_dpfe.c +++ b/drivers/memory/brcmstb_dpfe.c | |||
@@ -45,8 +45,16 @@ | |||
45 | #define REG_TO_DCPU_MBOX 0x10 | 45 | #define REG_TO_DCPU_MBOX 0x10 |
46 | #define REG_TO_HOST_MBOX 0x14 | 46 | #define REG_TO_HOST_MBOX 0x14 |
47 | 47 | ||
48 | /* Macros to process offsets returned by the DCPU */ | ||
49 | #define DRAM_MSG_ADDR_OFFSET 0x0 | ||
50 | #define DRAM_MSG_TYPE_OFFSET 0x1c | ||
51 | #define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1) | ||
52 | #define DRAM_MSG_TYPE_MASK ((1UL << \ | ||
53 | (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1) | ||
54 | |||
48 | /* Message RAM */ | 55 | /* Message RAM */ |
49 | #define DCPU_MSG_RAM(x) (0x100 + (x) * sizeof(u32)) | 56 | #define DCPU_MSG_RAM_START 0x100 |
57 | #define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32)) | ||
50 | 58 | ||
51 | /* DRAM Info Offsets & Masks */ | 59 | /* DRAM Info Offsets & Masks */ |
52 | #define DRAM_INFO_INTERVAL 0x0 | 60 | #define DRAM_INFO_INTERVAL 0x0 |
@@ -255,6 +263,40 @@ static unsigned int get_msg_chksum(const u32 msg[]) | |||
255 | return sum; | 263 | return sum; |
256 | } | 264 | } |
257 | 265 | ||
266 | static void __iomem *get_msg_ptr(struct private_data *priv, u32 response, | ||
267 | char *buf, ssize_t *size) | ||
268 | { | ||
269 | unsigned int msg_type; | ||
270 | unsigned int offset; | ||
271 | void __iomem *ptr = NULL; | ||
272 | |||
273 | msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK; | ||
274 | offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK; | ||
275 | |||
276 | /* | ||
277 | * msg_type == 1: the offset is relative to the message RAM | ||
278 | * msg_type == 0: the offset is relative to the data RAM (this is the | ||
279 | * previous way of passing data) | ||
280 | * msg_type is anything else: there's critical hardware problem | ||
281 | */ | ||
282 | switch (msg_type) { | ||
283 | case 1: | ||
284 | ptr = priv->regs + DCPU_MSG_RAM_START + offset; | ||
285 | break; | ||
286 | case 0: | ||
287 | ptr = priv->dmem + offset; | ||
288 | break; | ||
289 | default: | ||
290 | dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n", | ||
291 | response); | ||
292 | if (buf && size) | ||
293 | *size = sprintf(buf, | ||
294 | "FATAL: communication error with DCPU\n"); | ||
295 | } | ||
296 | |||
297 | return ptr; | ||
298 | } | ||
299 | |||
258 | static int __send_command(struct private_data *priv, unsigned int cmd, | 300 | static int __send_command(struct private_data *priv, unsigned int cmd, |
259 | u32 result[]) | 301 | u32 result[]) |
260 | { | 302 | { |
@@ -507,7 +549,7 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr, | |||
507 | { | 549 | { |
508 | u32 response[MSG_FIELD_MAX]; | 550 | u32 response[MSG_FIELD_MAX]; |
509 | unsigned int info; | 551 | unsigned int info; |
510 | int ret; | 552 | ssize_t ret; |
511 | 553 | ||
512 | ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf); | 554 | ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf); |
513 | if (ret) | 555 | if (ret) |
@@ -528,18 +570,19 @@ static ssize_t show_refresh(struct device *dev, | |||
528 | u32 response[MSG_FIELD_MAX]; | 570 | u32 response[MSG_FIELD_MAX]; |
529 | void __iomem *info; | 571 | void __iomem *info; |
530 | struct private_data *priv; | 572 | struct private_data *priv; |
531 | unsigned int offset; | ||
532 | u8 refresh, sr_abort, ppre, thermal_offs, tuf; | 573 | u8 refresh, sr_abort, ppre, thermal_offs, tuf; |
533 | u32 mr4; | 574 | u32 mr4; |
534 | int ret; | 575 | ssize_t ret; |
535 | 576 | ||
536 | ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf); | 577 | ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf); |
537 | if (ret) | 578 | if (ret) |
538 | return ret; | 579 | return ret; |
539 | 580 | ||
540 | priv = dev_get_drvdata(dev); | 581 | priv = dev_get_drvdata(dev); |
541 | offset = response[MSG_ARG0]; | 582 | |
542 | info = priv->dmem + offset; | 583 | info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret); |
584 | if (!info) | ||
585 | return ret; | ||
543 | 586 | ||
544 | mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK; | 587 | mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK; |
545 | 588 | ||
@@ -561,7 +604,6 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr, | |||
561 | u32 response[MSG_FIELD_MAX]; | 604 | u32 response[MSG_FIELD_MAX]; |
562 | struct private_data *priv; | 605 | struct private_data *priv; |
563 | void __iomem *info; | 606 | void __iomem *info; |
564 | unsigned int offset; | ||
565 | unsigned long val; | 607 | unsigned long val; |
566 | int ret; | 608 | int ret; |
567 | 609 | ||
@@ -574,8 +616,10 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr, | |||
574 | if (ret) | 616 | if (ret) |
575 | return ret; | 617 | return ret; |
576 | 618 | ||
577 | offset = response[MSG_ARG0]; | 619 | info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL); |
578 | info = priv->dmem + offset; | 620 | if (!info) |
621 | return -EIO; | ||
622 | |||
579 | writel_relaxed(val, info + DRAM_INFO_INTERVAL); | 623 | writel_relaxed(val, info + DRAM_INFO_INTERVAL); |
580 | 624 | ||
581 | return count; | 625 | return count; |
@@ -587,23 +631,25 @@ static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr, | |||
587 | u32 response[MSG_FIELD_MAX]; | 631 | u32 response[MSG_FIELD_MAX]; |
588 | struct private_data *priv; | 632 | struct private_data *priv; |
589 | void __iomem *info; | 633 | void __iomem *info; |
590 | unsigned int offset; | 634 | ssize_t ret; |
591 | int ret; | ||
592 | 635 | ||
593 | ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf); | 636 | ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf); |
594 | if (ret) | 637 | if (ret) |
595 | return ret; | 638 | return ret; |
596 | 639 | ||
597 | offset = response[MSG_ARG0]; | ||
598 | priv = dev_get_drvdata(dev); | 640 | priv = dev_get_drvdata(dev); |
599 | info = priv->dmem + offset; | 641 | |
642 | info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret); | ||
643 | if (!info) | ||
644 | return ret; | ||
600 | 645 | ||
601 | return sprintf(buf, "%#x %#x %#x %#x %#x\n", | 646 | return sprintf(buf, "%#x %#x %#x %#x %#x\n", |
602 | readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK, | 647 | readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK, |
603 | readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK, | 648 | readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK, |
604 | readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK, | 649 | readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK, |
605 | readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK, | 650 | readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK, |
606 | readl_relaxed(info + DRAM_VENDOR_ERROR)); | 651 | readl_relaxed(info + DRAM_VENDOR_ERROR) & |
652 | DRAM_VENDOR_MASK); | ||
607 | } | 653 | } |
608 | 654 | ||
609 | static int brcmstb_dpfe_resume(struct platform_device *pdev) | 655 | static int brcmstb_dpfe_resume(struct platform_device *pdev) |
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index 337462e1569f..038509e5d031 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c | |||
@@ -102,10 +102,32 @@ static long afu_ioctl_attach(struct ocxl_context *ctx, | |||
102 | return rc; | 102 | return rc; |
103 | } | 103 | } |
104 | 104 | ||
105 | static long afu_ioctl_get_metadata(struct ocxl_context *ctx, | ||
106 | struct ocxl_ioctl_metadata __user *uarg) | ||
107 | { | ||
108 | struct ocxl_ioctl_metadata arg; | ||
109 | |||
110 | memset(&arg, 0, sizeof(arg)); | ||
111 | |||
112 | arg.version = 0; | ||
113 | |||
114 | arg.afu_version_major = ctx->afu->config.version_major; | ||
115 | arg.afu_version_minor = ctx->afu->config.version_minor; | ||
116 | arg.pasid = ctx->pasid; | ||
117 | arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride; | ||
118 | arg.global_mmio_size = ctx->afu->config.global_mmio_size; | ||
119 | |||
120 | if (copy_to_user(uarg, &arg, sizeof(arg))) | ||
121 | return -EFAULT; | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
105 | #define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ | 126 | #define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ |
106 | x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ | 127 | x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ |
107 | x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ | 128 | x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ |
108 | x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ | 129 | x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ |
130 | x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \ | ||
109 | "UNKNOWN") | 131 | "UNKNOWN") |
110 | 132 | ||
111 | static long afu_ioctl(struct file *file, unsigned int cmd, | 133 | static long afu_ioctl(struct file *file, unsigned int cmd, |
@@ -159,6 +181,11 @@ static long afu_ioctl(struct file *file, unsigned int cmd, | |||
159 | irq_fd.eventfd); | 181 | irq_fd.eventfd); |
160 | break; | 182 | break; |
161 | 183 | ||
184 | case OCXL_IOCTL_GET_METADATA: | ||
185 | rc = afu_ioctl_get_metadata(ctx, | ||
186 | (struct ocxl_ioctl_metadata __user *) args); | ||
187 | break; | ||
188 | |||
162 | default: | 189 | default: |
163 | rc = -EINVAL; | 190 | rc = -EINVAL; |
164 | } | 191 | } |
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 908e4db03535..42d6aa89a48a 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c | |||
@@ -848,7 +848,6 @@ int mmc_interrupt_hpi(struct mmc_card *card) | |||
848 | return 1; | 848 | return 1; |
849 | } | 849 | } |
850 | 850 | ||
851 | mmc_claim_host(card->host); | ||
852 | err = mmc_send_status(card, &status); | 851 | err = mmc_send_status(card, &status); |
853 | if (err) { | 852 | if (err) { |
854 | pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); | 853 | pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); |
@@ -890,7 +889,6 @@ int mmc_interrupt_hpi(struct mmc_card *card) | |||
890 | } while (!err); | 889 | } while (!err); |
891 | 890 | ||
892 | out: | 891 | out: |
893 | mmc_release_host(card->host); | ||
894 | return err; | 892 | return err; |
895 | } | 893 | } |
896 | 894 | ||
@@ -932,9 +930,7 @@ static int mmc_read_bkops_status(struct mmc_card *card) | |||
932 | int err; | 930 | int err; |
933 | u8 *ext_csd; | 931 | u8 *ext_csd; |
934 | 932 | ||
935 | mmc_claim_host(card->host); | ||
936 | err = mmc_get_ext_csd(card, &ext_csd); | 933 | err = mmc_get_ext_csd(card, &ext_csd); |
937 | mmc_release_host(card->host); | ||
938 | if (err) | 934 | if (err) |
939 | return err; | 935 | return err; |
940 | 936 | ||
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 35026795be28..fa41d9422d57 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c | |||
@@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = { | |||
487 | 487 | ||
488 | static const struct dw_mci_drv_data exynos_drv_data = { | 488 | static const struct dw_mci_drv_data exynos_drv_data = { |
489 | .caps = exynos_dwmmc_caps, | 489 | .caps = exynos_dwmmc_caps, |
490 | .num_caps = ARRAY_SIZE(exynos_dwmmc_caps), | ||
490 | .init = dw_mci_exynos_priv_init, | 491 | .init = dw_mci_exynos_priv_init, |
491 | .set_ios = dw_mci_exynos_set_ios, | 492 | .set_ios = dw_mci_exynos_set_ios, |
492 | .parse_dt = dw_mci_exynos_parse_dt, | 493 | .parse_dt = dw_mci_exynos_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 73fd75c3c824..89cdb3d533bb 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c | |||
@@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host) | |||
135 | if (priv->ctrl_id < 0) | 135 | if (priv->ctrl_id < 0) |
136 | priv->ctrl_id = 0; | 136 | priv->ctrl_id = 0; |
137 | 137 | ||
138 | if (priv->ctrl_id >= TIMING_MODE) | ||
139 | return -EINVAL; | ||
140 | |||
138 | host->priv = priv; | 141 | host->priv = priv; |
139 | return 0; | 142 | return 0; |
140 | } | 143 | } |
@@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode) | |||
207 | 210 | ||
208 | static const struct dw_mci_drv_data hi6220_data = { | 211 | static const struct dw_mci_drv_data hi6220_data = { |
209 | .caps = dw_mci_hi6220_caps, | 212 | .caps = dw_mci_hi6220_caps, |
213 | .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps), | ||
210 | .switch_voltage = dw_mci_hi6220_switch_voltage, | 214 | .switch_voltage = dw_mci_hi6220_switch_voltage, |
211 | .set_ios = dw_mci_hi6220_set_ios, | 215 | .set_ios = dw_mci_hi6220_set_ios, |
212 | .parse_dt = dw_mci_hi6220_parse_dt, | 216 | .parse_dt = dw_mci_hi6220_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index a3f1c2b30145..339295212935 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c | |||
@@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = { | |||
319 | 319 | ||
320 | static const struct dw_mci_drv_data rk3288_drv_data = { | 320 | static const struct dw_mci_drv_data rk3288_drv_data = { |
321 | .caps = dw_mci_rk3288_dwmmc_caps, | 321 | .caps = dw_mci_rk3288_dwmmc_caps, |
322 | .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps), | ||
322 | .set_ios = dw_mci_rk3288_set_ios, | 323 | .set_ios = dw_mci_rk3288_set_ios, |
323 | .execute_tuning = dw_mci_rk3288_execute_tuning, | 324 | .execute_tuning = dw_mci_rk3288_execute_tuning, |
324 | .parse_dt = dw_mci_rk3288_parse_dt, | 325 | .parse_dt = dw_mci_rk3288_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c index d38e94ae2b85..c06b5393312f 100644 --- a/drivers/mmc/host/dw_mmc-zx.c +++ b/drivers/mmc/host/dw_mmc-zx.c | |||
@@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = { | |||
195 | 195 | ||
196 | static const struct dw_mci_drv_data zx_drv_data = { | 196 | static const struct dw_mci_drv_data zx_drv_data = { |
197 | .caps = zx_dwmmc_caps, | 197 | .caps = zx_dwmmc_caps, |
198 | .num_caps = ARRAY_SIZE(zx_dwmmc_caps), | ||
198 | .execute_tuning = dw_mci_zx_execute_tuning, | 199 | .execute_tuning = dw_mci_zx_execute_tuning, |
199 | .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning, | 200 | .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning, |
200 | .parse_dt = dw_mci_zx_parse_dt, | 201 | .parse_dt = dw_mci_zx_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 0aa39975f33b..d9b4acefed31 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v) | |||
165 | { | 165 | { |
166 | struct dw_mci *host = s->private; | 166 | struct dw_mci *host = s->private; |
167 | 167 | ||
168 | pm_runtime_get_sync(host->dev); | ||
169 | |||
168 | seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); | 170 | seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); |
169 | seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); | 171 | seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); |
170 | seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); | 172 | seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); |
@@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v) | |||
172 | seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); | 174 | seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); |
173 | seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); | 175 | seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); |
174 | 176 | ||
177 | pm_runtime_put_autosuspend(host->dev); | ||
178 | |||
175 | return 0; | 179 | return 0; |
176 | } | 180 | } |
177 | 181 | ||
@@ -2778,12 +2782,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
2778 | return IRQ_HANDLED; | 2782 | return IRQ_HANDLED; |
2779 | } | 2783 | } |
2780 | 2784 | ||
2785 | static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) | ||
2786 | { | ||
2787 | struct dw_mci *host = slot->host; | ||
2788 | const struct dw_mci_drv_data *drv_data = host->drv_data; | ||
2789 | struct mmc_host *mmc = slot->mmc; | ||
2790 | int ctrl_id; | ||
2791 | |||
2792 | if (host->pdata->caps) | ||
2793 | mmc->caps = host->pdata->caps; | ||
2794 | |||
2795 | /* | ||
2796 | * Support MMC_CAP_ERASE by default. | ||
2797 | * It needs to use trim/discard/erase commands. | ||
2798 | */ | ||
2799 | mmc->caps |= MMC_CAP_ERASE; | ||
2800 | |||
2801 | if (host->pdata->pm_caps) | ||
2802 | mmc->pm_caps = host->pdata->pm_caps; | ||
2803 | |||
2804 | if (host->dev->of_node) { | ||
2805 | ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); | ||
2806 | if (ctrl_id < 0) | ||
2807 | ctrl_id = 0; | ||
2808 | } else { | ||
2809 | ctrl_id = to_platform_device(host->dev)->id; | ||
2810 | } | ||
2811 | |||
2812 | if (drv_data && drv_data->caps) { | ||
2813 | if (ctrl_id >= drv_data->num_caps) { | ||
2814 | dev_err(host->dev, "invalid controller id %d\n", | ||
2815 | ctrl_id); | ||
2816 | return -EINVAL; | ||
2817 | } | ||
2818 | mmc->caps |= drv_data->caps[ctrl_id]; | ||
2819 | } | ||
2820 | |||
2821 | if (host->pdata->caps2) | ||
2822 | mmc->caps2 = host->pdata->caps2; | ||
2823 | |||
2824 | /* Process SDIO IRQs through the sdio_irq_work. */ | ||
2825 | if (mmc->caps & MMC_CAP_SDIO_IRQ) | ||
2826 | mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; | ||
2827 | |||
2828 | return 0; | ||
2829 | } | ||
2830 | |||
2781 | static int dw_mci_init_slot(struct dw_mci *host) | 2831 | static int dw_mci_init_slot(struct dw_mci *host) |
2782 | { | 2832 | { |
2783 | struct mmc_host *mmc; | 2833 | struct mmc_host *mmc; |
2784 | struct dw_mci_slot *slot; | 2834 | struct dw_mci_slot *slot; |
2785 | const struct dw_mci_drv_data *drv_data = host->drv_data; | 2835 | int ret; |
2786 | int ctrl_id, ret; | ||
2787 | u32 freq[2]; | 2836 | u32 freq[2]; |
2788 | 2837 | ||
2789 | mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); | 2838 | mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); |
@@ -2817,38 +2866,13 @@ static int dw_mci_init_slot(struct dw_mci *host) | |||
2817 | if (!mmc->ocr_avail) | 2866 | if (!mmc->ocr_avail) |
2818 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | 2867 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
2819 | 2868 | ||
2820 | if (host->pdata->caps) | ||
2821 | mmc->caps = host->pdata->caps; | ||
2822 | |||
2823 | /* | ||
2824 | * Support MMC_CAP_ERASE by default. | ||
2825 | * It needs to use trim/discard/erase commands. | ||
2826 | */ | ||
2827 | mmc->caps |= MMC_CAP_ERASE; | ||
2828 | |||
2829 | if (host->pdata->pm_caps) | ||
2830 | mmc->pm_caps = host->pdata->pm_caps; | ||
2831 | |||
2832 | if (host->dev->of_node) { | ||
2833 | ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); | ||
2834 | if (ctrl_id < 0) | ||
2835 | ctrl_id = 0; | ||
2836 | } else { | ||
2837 | ctrl_id = to_platform_device(host->dev)->id; | ||
2838 | } | ||
2839 | if (drv_data && drv_data->caps) | ||
2840 | mmc->caps |= drv_data->caps[ctrl_id]; | ||
2841 | |||
2842 | if (host->pdata->caps2) | ||
2843 | mmc->caps2 = host->pdata->caps2; | ||
2844 | |||
2845 | ret = mmc_of_parse(mmc); | 2869 | ret = mmc_of_parse(mmc); |
2846 | if (ret) | 2870 | if (ret) |
2847 | goto err_host_allocated; | 2871 | goto err_host_allocated; |
2848 | 2872 | ||
2849 | /* Process SDIO IRQs through the sdio_irq_work. */ | 2873 | ret = dw_mci_init_slot_caps(slot); |
2850 | if (mmc->caps & MMC_CAP_SDIO_IRQ) | 2874 | if (ret) |
2851 | mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; | 2875 | goto err_host_allocated; |
2852 | 2876 | ||
2853 | /* Useful defaults if platform data is unset. */ | 2877 | /* Useful defaults if platform data is unset. */ |
2854 | if (host->use_dma == TRANS_MODE_IDMAC) { | 2878 | if (host->use_dma == TRANS_MODE_IDMAC) { |
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index e3124f06a47e..1424bd490dd1 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h | |||
@@ -543,6 +543,7 @@ struct dw_mci_slot { | |||
543 | /** | 543 | /** |
544 | * dw_mci driver data - dw-mshc implementation specific driver data. | 544 | * dw_mci driver data - dw-mshc implementation specific driver data. |
545 | * @caps: mmc subsystem specified capabilities of the controller(s). | 545 | * @caps: mmc subsystem specified capabilities of the controller(s). |
546 | * @num_caps: number of capabilities specified by @caps. | ||
546 | * @init: early implementation specific initialization. | 547 | * @init: early implementation specific initialization. |
547 | * @set_ios: handle bus specific extensions. | 548 | * @set_ios: handle bus specific extensions. |
548 | * @parse_dt: parse implementation specific device tree properties. | 549 | * @parse_dt: parse implementation specific device tree properties. |
@@ -554,6 +555,7 @@ struct dw_mci_slot { | |||
554 | */ | 555 | */ |
555 | struct dw_mci_drv_data { | 556 | struct dw_mci_drv_data { |
556 | unsigned long *caps; | 557 | unsigned long *caps; |
558 | u32 num_caps; | ||
557 | int (*init)(struct dw_mci *host); | 559 | int (*init)(struct dw_mci *host); |
558 | void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); | 560 | void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); |
559 | int (*parse_dt)(struct dw_mci *host); | 561 | int (*parse_dt)(struct dw_mci *host); |
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 6d1a983e6227..82c4f05f91d8 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
@@ -654,9 +654,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot) | |||
654 | slot->chip->rpm_retune = intel_host->d3_retune; | 654 | slot->chip->rpm_retune = intel_host->d3_retune; |
655 | } | 655 | } |
656 | 656 | ||
657 | static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) | 657 | static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) |
658 | { | ||
659 | int err = sdhci_execute_tuning(mmc, opcode); | ||
660 | struct sdhci_host *host = mmc_priv(mmc); | ||
661 | |||
662 | if (err) | ||
663 | return err; | ||
664 | |||
665 | /* | ||
666 | * Tuning can leave the IP in an active state (Buffer Read Enable bit | ||
667 | * set) which prevents the entry to low power states (i.e. S0i3). Data | ||
668 | * reset will clear it. | ||
669 | */ | ||
670 | sdhci_reset(host, SDHCI_RESET_DATA); | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | static void byt_probe_slot(struct sdhci_pci_slot *slot) | ||
658 | { | 676 | { |
677 | struct mmc_host_ops *ops = &slot->host->mmc_host_ops; | ||
678 | |||
659 | byt_read_dsm(slot); | 679 | byt_read_dsm(slot); |
680 | |||
681 | ops->execute_tuning = intel_execute_tuning; | ||
682 | } | ||
683 | |||
684 | static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) | ||
685 | { | ||
686 | byt_probe_slot(slot); | ||
660 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | | 687 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | |
661 | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | | 688 | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | |
662 | MMC_CAP_CMD_DURING_TFR | | 689 | MMC_CAP_CMD_DURING_TFR | |
@@ -779,7 +806,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | |||
779 | { | 806 | { |
780 | int err; | 807 | int err; |
781 | 808 | ||
782 | byt_read_dsm(slot); | 809 | byt_probe_slot(slot); |
783 | 810 | ||
784 | err = ni_set_max_freq(slot); | 811 | err = ni_set_max_freq(slot); |
785 | if (err) | 812 | if (err) |
@@ -792,7 +819,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | |||
792 | 819 | ||
793 | static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | 820 | static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) |
794 | { | 821 | { |
795 | byt_read_dsm(slot); | 822 | byt_probe_slot(slot); |
796 | slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | | 823 | slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | |
797 | MMC_CAP_WAIT_WHILE_BUSY; | 824 | MMC_CAP_WAIT_WHILE_BUSY; |
798 | return 0; | 825 | return 0; |
@@ -800,7 +827,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | |||
800 | 827 | ||
801 | static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) | 828 | static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) |
802 | { | 829 | { |
803 | byt_read_dsm(slot); | 830 | byt_probe_slot(slot); |
804 | slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | | 831 | slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | |
805 | MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; | 832 | MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; |
806 | slot->cd_idx = 0; | 833 | slot->cd_idx = 0; |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index f5c87bd35fa1..f27f9bae1a4a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -3063,9 +3063,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) | |||
3063 | if (ndev->features & NETIF_F_RXCSUM) | 3063 | if (ndev->features & NETIF_F_RXCSUM) |
3064 | gfar_rx_checksum(skb, fcb); | 3064 | gfar_rx_checksum(skb, fcb); |
3065 | 3065 | ||
3066 | /* Tell the skb what kind of packet this is */ | ||
3067 | skb->protocol = eth_type_trans(skb, ndev); | ||
3068 | |||
3069 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. | 3066 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
3070 | * Even if vlan rx accel is disabled, on some chips | 3067 | * Even if vlan rx accel is disabled, on some chips |
3071 | * RXFCB_VLN is pseudo randomly set. | 3068 | * RXFCB_VLN is pseudo randomly set. |
@@ -3136,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
3136 | continue; | 3133 | continue; |
3137 | } | 3134 | } |
3138 | 3135 | ||
3136 | gfar_process_frame(ndev, skb); | ||
3137 | |||
3139 | /* Increment the number of packets */ | 3138 | /* Increment the number of packets */ |
3140 | total_pkts++; | 3139 | total_pkts++; |
3141 | total_bytes += skb->len; | 3140 | total_bytes += skb->len; |
3142 | 3141 | ||
3143 | skb_record_rx_queue(skb, rx_queue->qindex); | 3142 | skb_record_rx_queue(skb, rx_queue->qindex); |
3144 | 3143 | ||
3145 | gfar_process_frame(ndev, skb); | 3144 | skb->protocol = eth_type_trans(skb, ndev); |
3146 | 3145 | ||
3147 | /* Send the packet up the stack */ | 3146 | /* Send the packet up the stack */ |
3148 | napi_gro_receive(&rx_queue->grp->napi_rx, skb); | 3147 | napi_gro_receive(&rx_queue->grp->napi_rx, skb); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0da5aa2c8aba..9fc063af233c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1888,6 +1888,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, | |||
1888 | ixgbe_rx_pg_size(rx_ring), | 1888 | ixgbe_rx_pg_size(rx_ring), |
1889 | DMA_FROM_DEVICE, | 1889 | DMA_FROM_DEVICE, |
1890 | IXGBE_RX_DMA_ATTR); | 1890 | IXGBE_RX_DMA_ATTR); |
1891 | } else if (ring_uses_build_skb(rx_ring)) { | ||
1892 | unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; | ||
1893 | |||
1894 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
1895 | IXGBE_CB(skb)->dma, | ||
1896 | offset, | ||
1897 | skb_headlen(skb), | ||
1898 | DMA_FROM_DEVICE); | ||
1891 | } else { | 1899 | } else { |
1892 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | 1900 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; |
1893 | 1901 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 21d29f7936f6..d39b0b7011b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) | |||
124 | trigger_cmd_completions(dev); | 124 | trigger_cmd_completions(dev); |
125 | } | 125 | } |
126 | 126 | ||
127 | mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); | 127 | mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1); |
128 | mlx5_core_err(dev, "end\n"); | 128 | mlx5_core_err(dev, "end\n"); |
129 | 129 | ||
130 | unlock: | 130 | unlock: |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index f6963b0b4a55..122506daa586 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h | |||
@@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { | |||
107 | MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), | 107 | MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), |
108 | MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), | 108 | MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), |
109 | MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), | 109 | MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), |
110 | MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), | ||
111 | MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), | ||
112 | MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), | ||
113 | MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), | ||
114 | MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), | ||
115 | MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), | ||
116 | MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), | ||
117 | MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), | ||
118 | MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), | ||
119 | MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), | 110 | MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), |
120 | MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), | 111 | MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), |
112 | MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), | ||
113 | MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), | ||
114 | MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), | ||
115 | MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), | ||
116 | MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), | ||
117 | MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), | ||
118 | MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), | ||
119 | MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), | ||
120 | MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8), | ||
121 | }; | 121 | }; |
122 | 122 | ||
123 | #define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 | 123 | #define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40 |
124 | 124 | ||
125 | struct mlxsw_afk_element_inst { /* element instance in actual block */ | 125 | struct mlxsw_afk_element_inst { /* element instance in actual block */ |
126 | const struct mlxsw_afk_element_info *info; | 126 | const struct mlxsw_afk_element_info *info; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3dcc58d61506..c7e941aecc2a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -1459,6 +1459,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) | |||
1459 | } | 1459 | } |
1460 | 1460 | ||
1461 | mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; | 1461 | mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; |
1462 | mlxsw_sp_port_vlan->ref_count = 1; | ||
1462 | mlxsw_sp_port_vlan->vid = vid; | 1463 | mlxsw_sp_port_vlan->vid = vid; |
1463 | list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); | 1464 | list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); |
1464 | 1465 | ||
@@ -1486,8 +1487,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) | |||
1486 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; | 1487 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; |
1487 | 1488 | ||
1488 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); | 1489 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); |
1489 | if (mlxsw_sp_port_vlan) | 1490 | if (mlxsw_sp_port_vlan) { |
1491 | mlxsw_sp_port_vlan->ref_count++; | ||
1490 | return mlxsw_sp_port_vlan; | 1492 | return mlxsw_sp_port_vlan; |
1493 | } | ||
1491 | 1494 | ||
1492 | return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); | 1495 | return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); |
1493 | } | 1496 | } |
@@ -1496,6 +1499,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) | |||
1496 | { | 1499 | { |
1497 | struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; | 1500 | struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; |
1498 | 1501 | ||
1502 | if (--mlxsw_sp_port_vlan->ref_count != 0) | ||
1503 | return; | ||
1504 | |||
1499 | if (mlxsw_sp_port_vlan->bridge_port) | 1505 | if (mlxsw_sp_port_vlan->bridge_port) |
1500 | mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); | 1506 | mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); |
1501 | else if (fid) | 1507 | else if (fid) |
@@ -4207,13 +4213,12 @@ static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = { | |||
4207 | .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, | 4213 | .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, |
4208 | }; | 4214 | }; |
4209 | 4215 | ||
4210 | static struct devlink_resource_size_params mlxsw_sp_kvd_size_params; | ||
4211 | static struct devlink_resource_size_params mlxsw_sp_linear_size_params; | ||
4212 | static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params; | ||
4213 | static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params; | ||
4214 | |||
4215 | static void | 4216 | static void |
4216 | mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) | 4217 | mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, |
4218 | struct devlink_resource_size_params *kvd_size_params, | ||
4219 | struct devlink_resource_size_params *linear_size_params, | ||
4220 | struct devlink_resource_size_params *hash_double_size_params, | ||
4221 | struct devlink_resource_size_params *hash_single_size_params) | ||
4217 | { | 4222 | { |
4218 | u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, | 4223 | u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, |
4219 | KVD_SINGLE_MIN_SIZE); | 4224 | KVD_SINGLE_MIN_SIZE); |
@@ -4222,37 +4227,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) | |||
4222 | u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); | 4227 | u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); |
4223 | u32 linear_size_min = 0; | 4228 | u32 linear_size_min = 0; |
4224 | 4229 | ||
4225 | /* KVD top resource */ | 4230 | devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, |
4226 | mlxsw_sp_kvd_size_params.size_min = kvd_size; | 4231 | MLXSW_SP_KVD_GRANULARITY, |
4227 | mlxsw_sp_kvd_size_params.size_max = kvd_size; | 4232 | DEVLINK_RESOURCE_UNIT_ENTRY); |
4228 | mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; | 4233 | devlink_resource_size_params_init(linear_size_params, linear_size_min, |
4229 | mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; | 4234 | kvd_size - single_size_min - |
4230 | 4235 | double_size_min, | |
4231 | /* Linear part init */ | 4236 | MLXSW_SP_KVD_GRANULARITY, |
4232 | mlxsw_sp_linear_size_params.size_min = linear_size_min; | 4237 | DEVLINK_RESOURCE_UNIT_ENTRY); |
4233 | mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - | 4238 | devlink_resource_size_params_init(hash_double_size_params, |
4234 | double_size_min; | 4239 | double_size_min, |
4235 | mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; | 4240 | kvd_size - single_size_min - |
4236 | mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; | 4241 | linear_size_min, |
4237 | 4242 | MLXSW_SP_KVD_GRANULARITY, | |
4238 | /* Hash double part init */ | 4243 | DEVLINK_RESOURCE_UNIT_ENTRY); |
4239 | mlxsw_sp_hash_double_size_params.size_min = double_size_min; | 4244 | devlink_resource_size_params_init(hash_single_size_params, |
4240 | mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - | 4245 | single_size_min, |
4241 | linear_size_min; | 4246 | kvd_size - double_size_min - |
4242 | mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; | 4247 | linear_size_min, |
4243 | mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; | 4248 | MLXSW_SP_KVD_GRANULARITY, |
4244 | 4249 | DEVLINK_RESOURCE_UNIT_ENTRY); | |
4245 | /* Hash single part init */ | ||
4246 | mlxsw_sp_hash_single_size_params.size_min = single_size_min; | ||
4247 | mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min - | ||
4248 | linear_size_min; | ||
4249 | mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; | ||
4250 | mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; | ||
4251 | } | 4250 | } |
4252 | 4251 | ||
4253 | static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) | 4252 | static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) |
4254 | { | 4253 | { |
4255 | struct devlink *devlink = priv_to_devlink(mlxsw_core); | 4254 | struct devlink *devlink = priv_to_devlink(mlxsw_core); |
4255 | struct devlink_resource_size_params hash_single_size_params; | ||
4256 | struct devlink_resource_size_params hash_double_size_params; | ||
4257 | struct devlink_resource_size_params linear_size_params; | ||
4258 | struct devlink_resource_size_params kvd_size_params; | ||
4256 | u32 kvd_size, single_size, double_size, linear_size; | 4259 | u32 kvd_size, single_size, double_size, linear_size; |
4257 | const struct mlxsw_config_profile *profile; | 4260 | const struct mlxsw_config_profile *profile; |
4258 | int err; | 4261 | int err; |
@@ -4261,13 +4264,17 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) | |||
4261 | if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) | 4264 | if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) |
4262 | return -EIO; | 4265 | return -EIO; |
4263 | 4266 | ||
4264 | mlxsw_sp_resource_size_params_prepare(mlxsw_core); | 4267 | mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, |
4268 | &linear_size_params, | ||
4269 | &hash_double_size_params, | ||
4270 | &hash_single_size_params); | ||
4271 | |||
4265 | kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); | 4272 | kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); |
4266 | err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, | 4273 | err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, |
4267 | true, kvd_size, | 4274 | true, kvd_size, |
4268 | MLXSW_SP_RESOURCE_KVD, | 4275 | MLXSW_SP_RESOURCE_KVD, |
4269 | DEVLINK_RESOURCE_ID_PARENT_TOP, | 4276 | DEVLINK_RESOURCE_ID_PARENT_TOP, |
4270 | &mlxsw_sp_kvd_size_params, | 4277 | &kvd_size_params, |
4271 | &mlxsw_sp_resource_kvd_ops); | 4278 | &mlxsw_sp_resource_kvd_ops); |
4272 | if (err) | 4279 | if (err) |
4273 | return err; | 4280 | return err; |
@@ -4277,7 +4284,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) | |||
4277 | false, linear_size, | 4284 | false, linear_size, |
4278 | MLXSW_SP_RESOURCE_KVD_LINEAR, | 4285 | MLXSW_SP_RESOURCE_KVD_LINEAR, |
4279 | MLXSW_SP_RESOURCE_KVD, | 4286 | MLXSW_SP_RESOURCE_KVD, |
4280 | &mlxsw_sp_linear_size_params, | 4287 | &linear_size_params, |
4281 | &mlxsw_sp_resource_kvd_linear_ops); | 4288 | &mlxsw_sp_resource_kvd_linear_ops); |
4282 | if (err) | 4289 | if (err) |
4283 | return err; | 4290 | return err; |
@@ -4291,7 +4298,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) | |||
4291 | false, double_size, | 4298 | false, double_size, |
4292 | MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, | 4299 | MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, |
4293 | MLXSW_SP_RESOURCE_KVD, | 4300 | MLXSW_SP_RESOURCE_KVD, |
4294 | &mlxsw_sp_hash_double_size_params, | 4301 | &hash_double_size_params, |
4295 | &mlxsw_sp_resource_kvd_hash_double_ops); | 4302 | &mlxsw_sp_resource_kvd_hash_double_ops); |
4296 | if (err) | 4303 | if (err) |
4297 | return err; | 4304 | return err; |
@@ -4301,7 +4308,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) | |||
4301 | false, single_size, | 4308 | false, single_size, |
4302 | MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, | 4309 | MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, |
4303 | MLXSW_SP_RESOURCE_KVD, | 4310 | MLXSW_SP_RESOURCE_KVD, |
4304 | &mlxsw_sp_hash_single_size_params, | 4311 | &hash_single_size_params, |
4305 | &mlxsw_sp_resource_kvd_hash_single_ops); | 4312 | &mlxsw_sp_resource_kvd_hash_single_ops); |
4306 | if (err) | 4313 | if (err) |
4307 | return err; | 4314 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index bdd8f94a452c..4ec1ca3c96c8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
@@ -211,6 +211,7 @@ struct mlxsw_sp_port_vlan { | |||
211 | struct list_head list; | 211 | struct list_head list; |
212 | struct mlxsw_sp_port *mlxsw_sp_port; | 212 | struct mlxsw_sp_port *mlxsw_sp_port; |
213 | struct mlxsw_sp_fid *fid; | 213 | struct mlxsw_sp_fid *fid; |
214 | unsigned int ref_count; | ||
214 | u16 vid; | 215 | u16 vid; |
215 | struct mlxsw_sp_bridge_port *bridge_port; | 216 | struct mlxsw_sp_bridge_port *bridge_port; |
216 | struct list_head bridge_vlan_node; | 217 | struct list_head bridge_vlan_node; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index bbd238e50f05..54262af4e98f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c | |||
@@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { | |||
112 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, | 112 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, |
113 | [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, | 113 | [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, |
114 | [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, | 114 | [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, |
115 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, | ||
115 | }; | 116 | }; |
116 | 117 | ||
117 | static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { | 118 | static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { |
118 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, | 119 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, |
119 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, | ||
120 | }; | 120 | }; |
121 | 121 | ||
122 | static const int *mlxsw_sp_packet_type_sfgc_types[] = { | 122 | static const int *mlxsw_sp_packet_type_sfgc_types[] = { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 593ad31be749..161bcdc012f0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
1203 | bool dynamic) | 1203 | bool dynamic) |
1204 | { | 1204 | { |
1205 | char *sfd_pl; | 1205 | char *sfd_pl; |
1206 | u8 num_rec; | ||
1206 | int err; | 1207 | int err; |
1207 | 1208 | ||
1208 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); | 1209 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); |
@@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
1212 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); | 1213 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); |
1213 | mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), | 1214 | mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), |
1214 | mac, fid, action, local_port); | 1215 | mac, fid, action, local_port); |
1216 | num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); | ||
1215 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); | 1217 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); |
1216 | kfree(sfd_pl); | 1218 | if (err) |
1219 | goto out; | ||
1220 | |||
1221 | if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) | ||
1222 | err = -EBUSY; | ||
1217 | 1223 | ||
1224 | out: | ||
1225 | kfree(sfd_pl); | ||
1218 | return err; | 1226 | return err; |
1219 | } | 1227 | } |
1220 | 1228 | ||
@@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, | |||
1239 | bool adding, bool dynamic) | 1247 | bool adding, bool dynamic) |
1240 | { | 1248 | { |
1241 | char *sfd_pl; | 1249 | char *sfd_pl; |
1250 | u8 num_rec; | ||
1242 | int err; | 1251 | int err; |
1243 | 1252 | ||
1244 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); | 1253 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); |
@@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, | |||
1249 | mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), | 1258 | mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), |
1250 | mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, | 1259 | mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, |
1251 | lag_vid, lag_id); | 1260 | lag_vid, lag_id); |
1261 | num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); | ||
1252 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); | 1262 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); |
1253 | kfree(sfd_pl); | 1263 | if (err) |
1264 | goto out; | ||
1265 | |||
1266 | if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) | ||
1267 | err = -EBUSY; | ||
1254 | 1268 | ||
1269 | out: | ||
1270 | kfree(sfd_pl); | ||
1255 | return err; | 1271 | return err; |
1256 | } | 1272 | } |
1257 | 1273 | ||
@@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, | |||
1296 | u16 fid, u16 mid_idx, bool adding) | 1312 | u16 fid, u16 mid_idx, bool adding) |
1297 | { | 1313 | { |
1298 | char *sfd_pl; | 1314 | char *sfd_pl; |
1315 | u8 num_rec; | ||
1299 | int err; | 1316 | int err; |
1300 | 1317 | ||
1301 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); | 1318 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); |
@@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, | |||
1305 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); | 1322 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); |
1306 | mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, | 1323 | mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, |
1307 | MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); | 1324 | MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); |
1325 | num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); | ||
1308 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); | 1326 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); |
1327 | if (err) | ||
1328 | goto out; | ||
1329 | |||
1330 | if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) | ||
1331 | err = -EBUSY; | ||
1332 | |||
1333 | out: | ||
1309 | kfree(sfd_pl); | 1334 | kfree(sfd_pl); |
1310 | return err; | 1335 | return err; |
1311 | } | 1336 | } |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 92dcf8717fc6..14c839bb09e7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -439,6 +439,17 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, | |||
439 | enum_index); | 439 | enum_index); |
440 | } | 440 | } |
441 | 441 | ||
442 | static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, | ||
443 | int enum_index) | ||
444 | { | ||
445 | iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); | ||
446 | } | ||
447 | |||
448 | static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) | ||
449 | { | ||
450 | return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); | ||
451 | } | ||
452 | |||
442 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) | 453 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) |
443 | { | 454 | { |
444 | return mdp->reg_offset == sh_eth_offset_gigabit; | 455 | return mdp->reg_offset == sh_eth_offset_gigabit; |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a6753ccba711..e5fe70134690 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -567,15 +567,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, | |||
567 | return mdp->tsu_addr + mdp->reg_offset[enum_index]; | 567 | return mdp->tsu_addr + mdp->reg_offset[enum_index]; |
568 | } | 568 | } |
569 | 569 | ||
570 | static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, | ||
571 | int enum_index) | ||
572 | { | ||
573 | iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); | ||
574 | } | ||
575 | |||
576 | static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) | ||
577 | { | ||
578 | return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); | ||
579 | } | ||
580 | |||
581 | #endif /* #ifndef __SH_ETH_H__ */ | 570 | #endif /* #ifndef __SH_ETH_H__ */ |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 17e529af79dc..0265d703eb03 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev, | |||
852 | if (unlikely(!net_device || net_device->destroy)) | 852 | if (unlikely(!net_device || net_device->destroy)) |
853 | return -ENODEV; | 853 | return -ENODEV; |
854 | 854 | ||
855 | /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get | ||
856 | * here before the negotiation with the host is finished and | ||
857 | * send_section_map may not be allocated yet. | ||
858 | */ | ||
859 | if (unlikely(!net_device->send_section_map)) | ||
860 | return -EAGAIN; | ||
861 | |||
862 | nvchan = &net_device->chan_table[packet->q_idx]; | 855 | nvchan = &net_device->chan_table[packet->q_idx]; |
863 | packet->send_buf_index = NETVSC_INVALID_INDEX; | 856 | packet->send_buf_index = NETVSC_INVALID_INDEX; |
864 | packet->cp_partial = false; | 857 | packet->cp_partial = false; |
@@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev, | |||
866 | /* Send control message directly without accessing msd (Multi-Send | 859 | /* Send control message directly without accessing msd (Multi-Send |
867 | * Data) field which may be changed during data packet processing. | 860 | * Data) field which may be changed during data packet processing. |
868 | */ | 861 | */ |
869 | if (!skb) { | 862 | if (!skb) |
870 | cur_send = packet; | 863 | return netvsc_send_pkt(device, packet, net_device, pb, skb); |
871 | goto send_now; | ||
872 | } | ||
873 | 864 | ||
874 | /* batch packets in send buffer if possible */ | 865 | /* batch packets in send buffer if possible */ |
875 | msdp = &nvchan->msd; | 866 | msdp = &nvchan->msd; |
@@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev, | |||
953 | } | 944 | } |
954 | } | 945 | } |
955 | 946 | ||
956 | send_now: | ||
957 | if (cur_send) | 947 | if (cur_send) |
958 | ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); | 948 | ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); |
959 | 949 | ||
@@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget) | |||
1217 | if (send_recv_completions(ndev, net_device, nvchan) == 0 && | 1207 | if (send_recv_completions(ndev, net_device, nvchan) == 0 && |
1218 | work_done < budget && | 1208 | work_done < budget && |
1219 | napi_complete_done(napi, work_done) && | 1209 | napi_complete_done(napi, work_done) && |
1220 | hv_end_read(&channel->inbound)) { | 1210 | hv_end_read(&channel->inbound) && |
1211 | napi_schedule_prep(napi)) { | ||
1221 | hv_begin_read(&channel->inbound); | 1212 | hv_begin_read(&channel->inbound); |
1222 | napi_reschedule(napi); | 1213 | __napi_schedule(napi); |
1223 | } | 1214 | } |
1224 | 1215 | ||
1225 | /* Driver may overshoot since multiple packets per descriptor */ | 1216 | /* Driver may overshoot since multiple packets per descriptor */ |
@@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context) | |||
1242 | /* disable interupts from host */ | 1233 | /* disable interupts from host */ |
1243 | hv_begin_read(rbi); | 1234 | hv_begin_read(rbi); |
1244 | 1235 | ||
1245 | __napi_schedule(&nvchan->napi); | 1236 | __napi_schedule_irqoff(&nvchan->napi); |
1246 | } | 1237 | } |
1247 | } | 1238 | } |
1248 | 1239 | ||
@@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, | |||
1296 | netvsc_channel_cb, net_device->chan_table); | 1287 | netvsc_channel_cb, net_device->chan_table); |
1297 | 1288 | ||
1298 | if (ret != 0) { | 1289 | if (ret != 0) { |
1299 | netif_napi_del(&net_device->chan_table[0].napi); | ||
1300 | netdev_err(ndev, "unable to open channel: %d\n", ret); | 1290 | netdev_err(ndev, "unable to open channel: %d\n", ret); |
1301 | goto cleanup; | 1291 | goto cleanup; |
1302 | } | 1292 | } |
@@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, | |||
1306 | 1296 | ||
1307 | napi_enable(&net_device->chan_table[0].napi); | 1297 | napi_enable(&net_device->chan_table[0].napi); |
1308 | 1298 | ||
1309 | /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is | ||
1310 | * populated. | ||
1311 | */ | ||
1312 | rcu_assign_pointer(net_device_ctx->nvdev, net_device); | ||
1313 | |||
1314 | /* Connect with the NetVsp */ | 1299 | /* Connect with the NetVsp */ |
1315 | ret = netvsc_connect_vsp(device, net_device, device_info); | 1300 | ret = netvsc_connect_vsp(device, net_device, device_info); |
1316 | if (ret != 0) { | 1301 | if (ret != 0) { |
@@ -1319,6 +1304,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, | |||
1319 | goto close; | 1304 | goto close; |
1320 | } | 1305 | } |
1321 | 1306 | ||
1307 | /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is | ||
1308 | * populated. | ||
1309 | */ | ||
1310 | rcu_assign_pointer(net_device_ctx->nvdev, net_device); | ||
1311 | |||
1322 | return net_device; | 1312 | return net_device; |
1323 | 1313 | ||
1324 | close: | 1314 | close: |
@@ -1329,6 +1319,7 @@ close: | |||
1329 | vmbus_close(device->channel); | 1319 | vmbus_close(device->channel); |
1330 | 1320 | ||
1331 | cleanup: | 1321 | cleanup: |
1322 | netif_napi_del(&net_device->chan_table[0].napi); | ||
1332 | free_netvsc_device(&net_device->rcu); | 1323 | free_netvsc_device(&net_device->rcu); |
1333 | 1324 | ||
1334 | return ERR_PTR(ret); | 1325 | return ERR_PTR(ret); |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c5584c2d440e..cdb78eefab67 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -66,10 +66,36 @@ static int debug = -1; | |||
66 | module_param(debug, int, S_IRUGO); | 66 | module_param(debug, int, S_IRUGO); |
67 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | 67 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
68 | 68 | ||
69 | static void netvsc_set_multicast_list(struct net_device *net) | 69 | static void netvsc_change_rx_flags(struct net_device *net, int change) |
70 | { | 70 | { |
71 | struct net_device_context *net_device_ctx = netdev_priv(net); | 71 | struct net_device_context *ndev_ctx = netdev_priv(net); |
72 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | 72 | struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
73 | int inc; | ||
74 | |||
75 | if (!vf_netdev) | ||
76 | return; | ||
77 | |||
78 | if (change & IFF_PROMISC) { | ||
79 | inc = (net->flags & IFF_PROMISC) ? 1 : -1; | ||
80 | dev_set_promiscuity(vf_netdev, inc); | ||
81 | } | ||
82 | |||
83 | if (change & IFF_ALLMULTI) { | ||
84 | inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; | ||
85 | dev_set_allmulti(vf_netdev, inc); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static void netvsc_set_rx_mode(struct net_device *net) | ||
90 | { | ||
91 | struct net_device_context *ndev_ctx = netdev_priv(net); | ||
92 | struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); | ||
93 | struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); | ||
94 | |||
95 | if (vf_netdev) { | ||
96 | dev_uc_sync(vf_netdev, net); | ||
97 | dev_mc_sync(vf_netdev, net); | ||
98 | } | ||
73 | 99 | ||
74 | rndis_filter_update(nvdev); | 100 | rndis_filter_update(nvdev); |
75 | } | 101 | } |
@@ -91,12 +117,11 @@ static int netvsc_open(struct net_device *net) | |||
91 | return ret; | 117 | return ret; |
92 | } | 118 | } |
93 | 119 | ||
94 | netif_tx_wake_all_queues(net); | ||
95 | |||
96 | rdev = nvdev->extension; | 120 | rdev = nvdev->extension; |
97 | 121 | if (!rdev->link_state) { | |
98 | if (!rdev->link_state) | ||
99 | netif_carrier_on(net); | 122 | netif_carrier_on(net); |
123 | netif_tx_wake_all_queues(net); | ||
124 | } | ||
100 | 125 | ||
101 | if (vf_netdev) { | 126 | if (vf_netdev) { |
102 | /* Setting synthetic device up transparently sets | 127 | /* Setting synthetic device up transparently sets |
@@ -299,8 +324,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, | |||
299 | rcu_read_lock(); | 324 | rcu_read_lock(); |
300 | vf_netdev = rcu_dereference(ndc->vf_netdev); | 325 | vf_netdev = rcu_dereference(ndc->vf_netdev); |
301 | if (vf_netdev) { | 326 | if (vf_netdev) { |
302 | txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; | 327 | const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; |
303 | qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; | 328 | |
329 | if (vf_ops->ndo_select_queue) | ||
330 | txq = vf_ops->ndo_select_queue(vf_netdev, skb, | ||
331 | accel_priv, fallback); | ||
332 | else | ||
333 | txq = fallback(vf_netdev, skb); | ||
334 | |||
335 | /* Record the queue selected by VF so that it can be | ||
336 | * used for common case where VF has more queues than | ||
337 | * the synthetic device. | ||
338 | */ | ||
339 | qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; | ||
304 | } else { | 340 | } else { |
305 | txq = netvsc_pick_tx(ndev, skb); | 341 | txq = netvsc_pick_tx(ndev, skb); |
306 | } | 342 | } |
@@ -1576,7 +1612,8 @@ static const struct net_device_ops device_ops = { | |||
1576 | .ndo_open = netvsc_open, | 1612 | .ndo_open = netvsc_open, |
1577 | .ndo_stop = netvsc_close, | 1613 | .ndo_stop = netvsc_close, |
1578 | .ndo_start_xmit = netvsc_start_xmit, | 1614 | .ndo_start_xmit = netvsc_start_xmit, |
1579 | .ndo_set_rx_mode = netvsc_set_multicast_list, | 1615 | .ndo_change_rx_flags = netvsc_change_rx_flags, |
1616 | .ndo_set_rx_mode = netvsc_set_rx_mode, | ||
1580 | .ndo_change_mtu = netvsc_change_mtu, | 1617 | .ndo_change_mtu = netvsc_change_mtu, |
1581 | .ndo_validate_addr = eth_validate_addr, | 1618 | .ndo_validate_addr = eth_validate_addr, |
1582 | .ndo_set_mac_address = netvsc_set_mac_addr, | 1619 | .ndo_set_mac_address = netvsc_set_mac_addr, |
@@ -1807,6 +1844,11 @@ static void __netvsc_vf_setup(struct net_device *ndev, | |||
1807 | netdev_warn(vf_netdev, | 1844 | netdev_warn(vf_netdev, |
1808 | "unable to change mtu to %u\n", ndev->mtu); | 1845 | "unable to change mtu to %u\n", ndev->mtu); |
1809 | 1846 | ||
1847 | /* set multicast etc flags on VF */ | ||
1848 | dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); | ||
1849 | dev_uc_sync(vf_netdev, ndev); | ||
1850 | dev_mc_sync(vf_netdev, ndev); | ||
1851 | |||
1810 | if (netif_running(ndev)) { | 1852 | if (netif_running(ndev)) { |
1811 | ret = dev_open(vf_netdev); | 1853 | ret = dev_open(vf_netdev); |
1812 | if (ret) | 1854 | if (ret) |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index c3ca191fea7f..8927c483c217 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -854,15 +854,19 @@ static void rndis_set_multicast(struct work_struct *w) | |||
854 | { | 854 | { |
855 | struct rndis_device *rdev | 855 | struct rndis_device *rdev |
856 | = container_of(w, struct rndis_device, mcast_work); | 856 | = container_of(w, struct rndis_device, mcast_work); |
857 | u32 filter = NDIS_PACKET_TYPE_DIRECTED; | ||
858 | unsigned int flags = rdev->ndev->flags; | ||
857 | 859 | ||
858 | if (rdev->ndev->flags & IFF_PROMISC) | 860 | if (flags & IFF_PROMISC) { |
859 | rndis_filter_set_packet_filter(rdev, | 861 | filter = NDIS_PACKET_TYPE_PROMISCUOUS; |
860 | NDIS_PACKET_TYPE_PROMISCUOUS); | 862 | } else { |
861 | else | 863 | if (flags & IFF_ALLMULTI) |
862 | rndis_filter_set_packet_filter(rdev, | 864 | flags |= NDIS_PACKET_TYPE_ALL_MULTICAST; |
863 | NDIS_PACKET_TYPE_BROADCAST | | 865 | if (flags & IFF_BROADCAST) |
864 | NDIS_PACKET_TYPE_ALL_MULTICAST | | 866 | flags |= NDIS_PACKET_TYPE_BROADCAST; |
865 | NDIS_PACKET_TYPE_DIRECTED); | 867 | } |
868 | |||
869 | rndis_filter_set_packet_filter(rdev, filter); | ||
866 | } | 870 | } |
867 | 871 | ||
868 | void rndis_filter_update(struct netvsc_device *nvdev) | 872 | void rndis_filter_update(struct netvsc_device *nvdev) |
@@ -1340,6 +1344,9 @@ void rndis_filter_device_remove(struct hv_device *dev, | |||
1340 | { | 1344 | { |
1341 | struct rndis_device *rndis_dev = net_dev->extension; | 1345 | struct rndis_device *rndis_dev = net_dev->extension; |
1342 | 1346 | ||
1347 | /* Don't try and setup sub channels if about to halt */ | ||
1348 | cancel_work_sync(&net_dev->subchan_work); | ||
1349 | |||
1343 | /* Halt and release the rndis device */ | 1350 | /* Halt and release the rndis device */ |
1344 | rndis_filter_halt_device(rndis_dev); | 1351 | rndis_filter_halt_device(rndis_dev); |
1345 | 1352 | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index e3e29c2b028b..a6f924fee584 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -819,7 +819,7 @@ void phy_start(struct phy_device *phydev) | |||
819 | break; | 819 | break; |
820 | case PHY_HALTED: | 820 | case PHY_HALTED: |
821 | /* if phy was suspended, bring the physical link up again */ | 821 | /* if phy was suspended, bring the physical link up again */ |
822 | phy_resume(phydev); | 822 | __phy_resume(phydev); |
823 | 823 | ||
824 | /* make sure interrupts are re-enabled for the PHY */ | 824 | /* make sure interrupts are re-enabled for the PHY */ |
825 | if (phy_interrupt_is_valid(phydev)) { | 825 | if (phy_interrupt_is_valid(phydev)) { |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index d39ae77707ef..478405e544cc 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev) | |||
135 | if (!mdio_bus_phy_may_suspend(phydev)) | 135 | if (!mdio_bus_phy_may_suspend(phydev)) |
136 | goto no_resume; | 136 | goto no_resume; |
137 | 137 | ||
138 | mutex_lock(&phydev->lock); | ||
139 | ret = phy_resume(phydev); | 138 | ret = phy_resume(phydev); |
140 | mutex_unlock(&phydev->lock); | ||
141 | if (ret < 0) | 139 | if (ret < 0) |
142 | return ret; | 140 | return ret; |
143 | 141 | ||
@@ -1041,9 +1039,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
1041 | if (err) | 1039 | if (err) |
1042 | goto error; | 1040 | goto error; |
1043 | 1041 | ||
1044 | mutex_lock(&phydev->lock); | ||
1045 | phy_resume(phydev); | 1042 | phy_resume(phydev); |
1046 | mutex_unlock(&phydev->lock); | ||
1047 | phy_led_triggers_register(phydev); | 1043 | phy_led_triggers_register(phydev); |
1048 | 1044 | ||
1049 | return err; | 1045 | return err; |
@@ -1172,7 +1168,7 @@ int phy_suspend(struct phy_device *phydev) | |||
1172 | } | 1168 | } |
1173 | EXPORT_SYMBOL(phy_suspend); | 1169 | EXPORT_SYMBOL(phy_suspend); |
1174 | 1170 | ||
1175 | int phy_resume(struct phy_device *phydev) | 1171 | int __phy_resume(struct phy_device *phydev) |
1176 | { | 1172 | { |
1177 | struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); | 1173 | struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); |
1178 | int ret = 0; | 1174 | int ret = 0; |
@@ -1189,6 +1185,18 @@ int phy_resume(struct phy_device *phydev) | |||
1189 | 1185 | ||
1190 | return ret; | 1186 | return ret; |
1191 | } | 1187 | } |
1188 | EXPORT_SYMBOL(__phy_resume); | ||
1189 | |||
1190 | int phy_resume(struct phy_device *phydev) | ||
1191 | { | ||
1192 | int ret; | ||
1193 | |||
1194 | mutex_lock(&phydev->lock); | ||
1195 | ret = __phy_resume(phydev); | ||
1196 | mutex_unlock(&phydev->lock); | ||
1197 | |||
1198 | return ret; | ||
1199 | } | ||
1192 | EXPORT_SYMBOL(phy_resume); | 1200 | EXPORT_SYMBOL(phy_resume); |
1193 | 1201 | ||
1194 | int phy_loopback(struct phy_device *phydev, bool enable) | 1202 | int phy_loopback(struct phy_device *phydev, bool enable) |
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 255a5def56e9..fa2a9bdd1866 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -3161,6 +3161,15 @@ ppp_connect_channel(struct channel *pch, int unit) | |||
3161 | goto outl; | 3161 | goto outl; |
3162 | 3162 | ||
3163 | ppp_lock(ppp); | 3163 | ppp_lock(ppp); |
3164 | spin_lock_bh(&pch->downl); | ||
3165 | if (!pch->chan) { | ||
3166 | /* Don't connect unregistered channels */ | ||
3167 | spin_unlock_bh(&pch->downl); | ||
3168 | ppp_unlock(ppp); | ||
3169 | ret = -ENOTCONN; | ||
3170 | goto outl; | ||
3171 | } | ||
3172 | spin_unlock_bh(&pch->downl); | ||
3164 | if (pch->file.hdrlen > ppp->file.hdrlen) | 3173 | if (pch->file.hdrlen > ppp->file.hdrlen) |
3165 | ppp->file.hdrlen = pch->file.hdrlen; | 3174 | ppp->file.hdrlen = pch->file.hdrlen; |
3166 | hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ | 3175 | hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b52258c327d2..7433bb2e4451 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -181,7 +181,6 @@ struct tun_file { | |||
181 | struct tun_struct *detached; | 181 | struct tun_struct *detached; |
182 | struct ptr_ring tx_ring; | 182 | struct ptr_ring tx_ring; |
183 | struct xdp_rxq_info xdp_rxq; | 183 | struct xdp_rxq_info xdp_rxq; |
184 | int xdp_pending_pkts; | ||
185 | }; | 184 | }; |
186 | 185 | ||
187 | struct tun_flow_entry { | 186 | struct tun_flow_entry { |
@@ -1643,6 +1642,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1643 | else | 1642 | else |
1644 | *skb_xdp = 0; | 1643 | *skb_xdp = 0; |
1645 | 1644 | ||
1645 | preempt_disable(); | ||
1646 | rcu_read_lock(); | 1646 | rcu_read_lock(); |
1647 | xdp_prog = rcu_dereference(tun->xdp_prog); | 1647 | xdp_prog = rcu_dereference(tun->xdp_prog); |
1648 | if (xdp_prog && !*skb_xdp) { | 1648 | if (xdp_prog && !*skb_xdp) { |
@@ -1662,11 +1662,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1662 | case XDP_REDIRECT: | 1662 | case XDP_REDIRECT: |
1663 | get_page(alloc_frag->page); | 1663 | get_page(alloc_frag->page); |
1664 | alloc_frag->offset += buflen; | 1664 | alloc_frag->offset += buflen; |
1665 | ++tfile->xdp_pending_pkts; | ||
1666 | err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); | 1665 | err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); |
1666 | xdp_do_flush_map(); | ||
1667 | if (err) | 1667 | if (err) |
1668 | goto err_redirect; | 1668 | goto err_redirect; |
1669 | rcu_read_unlock(); | 1669 | rcu_read_unlock(); |
1670 | preempt_enable(); | ||
1670 | return NULL; | 1671 | return NULL; |
1671 | case XDP_TX: | 1672 | case XDP_TX: |
1672 | xdp_xmit = true; | 1673 | xdp_xmit = true; |
@@ -1688,6 +1689,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1688 | skb = build_skb(buf, buflen); | 1689 | skb = build_skb(buf, buflen); |
1689 | if (!skb) { | 1690 | if (!skb) { |
1690 | rcu_read_unlock(); | 1691 | rcu_read_unlock(); |
1692 | preempt_enable(); | ||
1691 | return ERR_PTR(-ENOMEM); | 1693 | return ERR_PTR(-ENOMEM); |
1692 | } | 1694 | } |
1693 | 1695 | ||
@@ -1700,10 +1702,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1700 | skb->dev = tun->dev; | 1702 | skb->dev = tun->dev; |
1701 | generic_xdp_tx(skb, xdp_prog); | 1703 | generic_xdp_tx(skb, xdp_prog); |
1702 | rcu_read_unlock(); | 1704 | rcu_read_unlock(); |
1705 | preempt_enable(); | ||
1703 | return NULL; | 1706 | return NULL; |
1704 | } | 1707 | } |
1705 | 1708 | ||
1706 | rcu_read_unlock(); | 1709 | rcu_read_unlock(); |
1710 | preempt_enable(); | ||
1707 | 1711 | ||
1708 | return skb; | 1712 | return skb; |
1709 | 1713 | ||
@@ -1711,6 +1715,7 @@ err_redirect: | |||
1711 | put_page(alloc_frag->page); | 1715 | put_page(alloc_frag->page); |
1712 | err_xdp: | 1716 | err_xdp: |
1713 | rcu_read_unlock(); | 1717 | rcu_read_unlock(); |
1718 | preempt_enable(); | ||
1714 | this_cpu_inc(tun->pcpu_stats->rx_dropped); | 1719 | this_cpu_inc(tun->pcpu_stats->rx_dropped); |
1715 | return NULL; | 1720 | return NULL; |
1716 | } | 1721 | } |
@@ -1984,11 +1989,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
1984 | result = tun_get_user(tun, tfile, NULL, from, | 1989 | result = tun_get_user(tun, tfile, NULL, from, |
1985 | file->f_flags & O_NONBLOCK, false); | 1990 | file->f_flags & O_NONBLOCK, false); |
1986 | 1991 | ||
1987 | if (tfile->xdp_pending_pkts) { | ||
1988 | tfile->xdp_pending_pkts = 0; | ||
1989 | xdp_do_flush_map(); | ||
1990 | } | ||
1991 | |||
1992 | tun_put(tun); | 1992 | tun_put(tun); |
1993 | return result; | 1993 | return result; |
1994 | } | 1994 | } |
@@ -2325,13 +2325,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) | |||
2325 | ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, | 2325 | ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, |
2326 | m->msg_flags & MSG_DONTWAIT, | 2326 | m->msg_flags & MSG_DONTWAIT, |
2327 | m->msg_flags & MSG_MORE); | 2327 | m->msg_flags & MSG_MORE); |
2328 | |||
2329 | if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT || | ||
2330 | !(m->msg_flags & MSG_MORE)) { | ||
2331 | tfile->xdp_pending_pkts = 0; | ||
2332 | xdp_do_flush_map(); | ||
2333 | } | ||
2334 | |||
2335 | tun_put(tun); | 2328 | tun_put(tun); |
2336 | return ret; | 2329 | return ret; |
2337 | } | 2330 | } |
@@ -3163,7 +3156,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) | |||
3163 | sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); | 3156 | sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); |
3164 | 3157 | ||
3165 | memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); | 3158 | memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); |
3166 | tfile->xdp_pending_pkts = 0; | ||
3167 | 3159 | ||
3168 | return 0; | 3160 | return 0; |
3169 | } | 3161 | } |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 05dca3e5c93d..fff4b13eece2 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -896,6 +896,12 @@ static const struct usb_device_id products[] = { | |||
896 | USB_CDC_PROTO_NONE), | 896 | USB_CDC_PROTO_NONE), |
897 | .driver_info = (unsigned long)&wwan_info, | 897 | .driver_info = (unsigned long)&wwan_info, |
898 | }, { | 898 | }, { |
899 | /* Cinterion PLS8 modem by GEMALTO */ | ||
900 | USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM, | ||
901 | USB_CDC_SUBCLASS_ETHERNET, | ||
902 | USB_CDC_PROTO_NONE), | ||
903 | .driver_info = (unsigned long)&wwan_info, | ||
904 | }, { | ||
899 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, | 905 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, |
900 | USB_CDC_PROTO_NONE), | 906 | USB_CDC_PROTO_NONE), |
901 | .driver_info = (unsigned long) &cdc_info, | 907 | .driver_info = (unsigned long) &cdc_info, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 958b2e8b90f6..86f7196f9d91 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -1794,7 +1794,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) | |||
1794 | 1794 | ||
1795 | tx_data += len; | 1795 | tx_data += len; |
1796 | agg->skb_len += len; | 1796 | agg->skb_len += len; |
1797 | agg->skb_num++; | 1797 | agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1; |
1798 | 1798 | ||
1799 | dev_kfree_skb_any(skb); | 1799 | dev_kfree_skb_any(skb); |
1800 | 1800 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 9bb9e562b893..23374603e4d9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -504,6 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, | |||
504 | page_off += *len; | 504 | page_off += *len; |
505 | 505 | ||
506 | while (--*num_buf) { | 506 | while (--*num_buf) { |
507 | int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
507 | unsigned int buflen; | 508 | unsigned int buflen; |
508 | void *buf; | 509 | void *buf; |
509 | int off; | 510 | int off; |
@@ -518,7 +519,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, | |||
518 | /* guard against a misconfigured or uncooperative backend that | 519 | /* guard against a misconfigured or uncooperative backend that |
519 | * is sending packet larger than the MTU. | 520 | * is sending packet larger than the MTU. |
520 | */ | 521 | */ |
521 | if ((page_off + buflen) > PAGE_SIZE) { | 522 | if ((page_off + buflen + tailroom) > PAGE_SIZE) { |
522 | put_page(p); | 523 | put_page(p); |
523 | goto err_buf; | 524 | goto err_buf; |
524 | } | 525 | } |
@@ -690,6 +691,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
690 | unsigned int truesize; | 691 | unsigned int truesize; |
691 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); | 692 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); |
692 | bool sent; | 693 | bool sent; |
694 | int err; | ||
693 | 695 | ||
694 | head_skb = NULL; | 696 | head_skb = NULL; |
695 | 697 | ||
@@ -701,7 +703,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
701 | void *data; | 703 | void *data; |
702 | u32 act; | 704 | u32 act; |
703 | 705 | ||
704 | /* This happens when rx buffer size is underestimated */ | 706 | /* This happens when rx buffer size is underestimated |
707 | * or headroom is not enough because of the buffer | ||
708 | * was refilled before XDP is set. This should only | ||
709 | * happen for the first several packets, so we don't | ||
710 | * care much about its performance. | ||
711 | */ | ||
705 | if (unlikely(num_buf > 1 || | 712 | if (unlikely(num_buf > 1 || |
706 | headroom < virtnet_get_headroom(vi))) { | 713 | headroom < virtnet_get_headroom(vi))) { |
707 | /* linearize data for XDP */ | 714 | /* linearize data for XDP */ |
@@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
736 | 743 | ||
737 | act = bpf_prog_run_xdp(xdp_prog, &xdp); | 744 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
738 | 745 | ||
739 | if (act != XDP_PASS) | ||
740 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); | ||
741 | |||
742 | switch (act) { | 746 | switch (act) { |
743 | case XDP_PASS: | 747 | case XDP_PASS: |
744 | /* recalculate offset to account for any header | 748 | /* recalculate offset to account for any header |
@@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
770 | goto err_xdp; | 774 | goto err_xdp; |
771 | rcu_read_unlock(); | 775 | rcu_read_unlock(); |
772 | goto xdp_xmit; | 776 | goto xdp_xmit; |
777 | case XDP_REDIRECT: | ||
778 | err = xdp_do_redirect(dev, &xdp, xdp_prog); | ||
779 | if (err) { | ||
780 | if (unlikely(xdp_page != page)) | ||
781 | put_page(xdp_page); | ||
782 | goto err_xdp; | ||
783 | } | ||
784 | *xdp_xmit = true; | ||
785 | if (unlikely(xdp_page != page)) | ||
786 | goto err_xdp; | ||
787 | rcu_read_unlock(); | ||
788 | goto xdp_xmit; | ||
773 | default: | 789 | default: |
774 | bpf_warn_invalid_xdp_action(act); | 790 | bpf_warn_invalid_xdp_action(act); |
775 | case XDP_ABORTED: | 791 | case XDP_ABORTED: |
@@ -1013,13 +1029,18 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, | |||
1013 | } | 1029 | } |
1014 | 1030 | ||
1015 | static unsigned int get_mergeable_buf_len(struct receive_queue *rq, | 1031 | static unsigned int get_mergeable_buf_len(struct receive_queue *rq, |
1016 | struct ewma_pkt_len *avg_pkt_len) | 1032 | struct ewma_pkt_len *avg_pkt_len, |
1033 | unsigned int room) | ||
1017 | { | 1034 | { |
1018 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); | 1035 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
1019 | unsigned int len; | 1036 | unsigned int len; |
1020 | 1037 | ||
1021 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), | 1038 | if (room) |
1039 | return PAGE_SIZE - room; | ||
1040 | |||
1041 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), | ||
1022 | rq->min_buf_len, PAGE_SIZE - hdr_len); | 1042 | rq->min_buf_len, PAGE_SIZE - hdr_len); |
1043 | |||
1023 | return ALIGN(len, L1_CACHE_BYTES); | 1044 | return ALIGN(len, L1_CACHE_BYTES); |
1024 | } | 1045 | } |
1025 | 1046 | ||
@@ -1028,21 +1049,27 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, | |||
1028 | { | 1049 | { |
1029 | struct page_frag *alloc_frag = &rq->alloc_frag; | 1050 | struct page_frag *alloc_frag = &rq->alloc_frag; |
1030 | unsigned int headroom = virtnet_get_headroom(vi); | 1051 | unsigned int headroom = virtnet_get_headroom(vi); |
1052 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; | ||
1053 | unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); | ||
1031 | char *buf; | 1054 | char *buf; |
1032 | void *ctx; | 1055 | void *ctx; |
1033 | int err; | 1056 | int err; |
1034 | unsigned int len, hole; | 1057 | unsigned int len, hole; |
1035 | 1058 | ||
1036 | len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); | 1059 | /* Extra tailroom is needed to satisfy XDP's assumption. This |
1037 | if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) | 1060 | * means rx frags coalescing won't work, but consider we've |
1061 | * disabled GSO for XDP, it won't be a big issue. | ||
1062 | */ | ||
1063 | len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); | ||
1064 | if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) | ||
1038 | return -ENOMEM; | 1065 | return -ENOMEM; |
1039 | 1066 | ||
1040 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; | 1067 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
1041 | buf += headroom; /* advance address leaving hole at front of pkt */ | 1068 | buf += headroom; /* advance address leaving hole at front of pkt */ |
1042 | get_page(alloc_frag->page); | 1069 | get_page(alloc_frag->page); |
1043 | alloc_frag->offset += len + headroom; | 1070 | alloc_frag->offset += len + room; |
1044 | hole = alloc_frag->size - alloc_frag->offset; | 1071 | hole = alloc_frag->size - alloc_frag->offset; |
1045 | if (hole < len + headroom) { | 1072 | if (hole < len + room) { |
1046 | /* To avoid internal fragmentation, if there is very likely not | 1073 | /* To avoid internal fragmentation, if there is very likely not |
1047 | * enough space for another buffer, add the remaining space to | 1074 | * enough space for another buffer, add the remaining space to |
1048 | * the current buffer. | 1075 | * the current buffer. |
@@ -2185,8 +2212,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
2185 | } | 2212 | } |
2186 | 2213 | ||
2187 | /* Make sure NAPI is not using any XDP TX queues for RX. */ | 2214 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
2188 | for (i = 0; i < vi->max_queue_pairs; i++) | 2215 | if (netif_running(dev)) |
2189 | napi_disable(&vi->rq[i].napi); | 2216 | for (i = 0; i < vi->max_queue_pairs; i++) |
2217 | napi_disable(&vi->rq[i].napi); | ||
2190 | 2218 | ||
2191 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); | 2219 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); |
2192 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); | 2220 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
@@ -2205,7 +2233,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
2205 | } | 2233 | } |
2206 | if (old_prog) | 2234 | if (old_prog) |
2207 | bpf_prog_put(old_prog); | 2235 | bpf_prog_put(old_prog); |
2208 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | 2236 | if (netif_running(dev)) |
2237 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | ||
2209 | } | 2238 | } |
2210 | 2239 | ||
2211 | return 0; | 2240 | return 0; |
@@ -2576,12 +2605,15 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, | |||
2576 | { | 2605 | { |
2577 | struct virtnet_info *vi = netdev_priv(queue->dev); | 2606 | struct virtnet_info *vi = netdev_priv(queue->dev); |
2578 | unsigned int queue_index = get_netdev_rx_queue_index(queue); | 2607 | unsigned int queue_index = get_netdev_rx_queue_index(queue); |
2608 | unsigned int headroom = virtnet_get_headroom(vi); | ||
2609 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; | ||
2579 | struct ewma_pkt_len *avg; | 2610 | struct ewma_pkt_len *avg; |
2580 | 2611 | ||
2581 | BUG_ON(queue_index >= vi->max_queue_pairs); | 2612 | BUG_ON(queue_index >= vi->max_queue_pairs); |
2582 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; | 2613 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; |
2583 | return sprintf(buf, "%u\n", | 2614 | return sprintf(buf, "%u\n", |
2584 | get_mergeable_buf_len(&vi->rq[queue_index], avg)); | 2615 | get_mergeable_buf_len(&vi->rq[queue_index], avg, |
2616 | SKB_DATA_ALIGN(headroom + tailroom))); | ||
2585 | } | 2617 | } |
2586 | 2618 | ||
2587 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = | 2619 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = |
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index afeca6bcdade..ab8b3cbbb205 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c | |||
@@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t) | |||
574 | ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, | 574 | ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, |
575 | 0, NULL); | 575 | 0, NULL); |
576 | proto->restart_counter--; | 576 | proto->restart_counter--; |
577 | } else | 577 | } else if (netif_carrier_ok(proto->dev)) |
578 | ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, | ||
579 | 0, NULL); | ||
580 | else | ||
578 | ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, | 581 | ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, |
579 | 0, NULL); | 582 | 0, NULL); |
580 | break; | 583 | break; |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8328d395e332..3127bc8633ca 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -2005,7 +2005,10 @@ static void netback_changed(struct xenbus_device *dev, | |||
2005 | case XenbusStateInitialised: | 2005 | case XenbusStateInitialised: |
2006 | case XenbusStateReconfiguring: | 2006 | case XenbusStateReconfiguring: |
2007 | case XenbusStateReconfigured: | 2007 | case XenbusStateReconfigured: |
2008 | break; | ||
2009 | |||
2008 | case XenbusStateUnknown: | 2010 | case XenbusStateUnknown: |
2011 | wake_up_all(&module_unload_q); | ||
2009 | break; | 2012 | break; |
2010 | 2013 | ||
2011 | case XenbusStateInitWait: | 2014 | case XenbusStateInitWait: |
@@ -2136,7 +2139,9 @@ static int xennet_remove(struct xenbus_device *dev) | |||
2136 | xenbus_switch_state(dev, XenbusStateClosing); | 2139 | xenbus_switch_state(dev, XenbusStateClosing); |
2137 | wait_event(module_unload_q, | 2140 | wait_event(module_unload_q, |
2138 | xenbus_read_driver_state(dev->otherend) == | 2141 | xenbus_read_driver_state(dev->otherend) == |
2139 | XenbusStateClosing); | 2142 | XenbusStateClosing || |
2143 | xenbus_read_driver_state(dev->otherend) == | ||
2144 | XenbusStateUnknown); | ||
2140 | 2145 | ||
2141 | xenbus_switch_state(dev, XenbusStateClosed); | 2146 | xenbus_switch_state(dev, XenbusStateClosed); |
2142 | wait_event(module_unload_q, | 2147 | wait_event(module_unload_q, |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 10041ac4032c..06f8dcc52ca6 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -335,8 +335,7 @@ static int pmem_attach_disk(struct device *dev, | |||
335 | dev_warn(dev, "unable to guarantee persistence of writes\n"); | 335 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
336 | fua = 0; | 336 | fua = 0; |
337 | } | 337 | } |
338 | wbc = nvdimm_has_cache(nd_region) && | 338 | wbc = nvdimm_has_cache(nd_region); |
339 | !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags); | ||
340 | 339 | ||
341 | if (!devm_request_mem_region(dev, res->start, resource_size(res), | 340 | if (!devm_request_mem_region(dev, res->start, resource_size(res), |
342 | dev_name(&ndns->dev))) { | 341 | dev_name(&ndns->dev))) { |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0fe7ea35c221..7aeca5db7916 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -2844,7 +2844,7 @@ out: | |||
2844 | } | 2844 | } |
2845 | 2845 | ||
2846 | static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, | 2846 | static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, |
2847 | struct nvme_id_ns *id, bool *new) | 2847 | struct nvme_id_ns *id) |
2848 | { | 2848 | { |
2849 | struct nvme_ctrl *ctrl = ns->ctrl; | 2849 | struct nvme_ctrl *ctrl = ns->ctrl; |
2850 | bool is_shared = id->nmic & (1 << 0); | 2850 | bool is_shared = id->nmic & (1 << 0); |
@@ -2860,8 +2860,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, | |||
2860 | ret = PTR_ERR(head); | 2860 | ret = PTR_ERR(head); |
2861 | goto out_unlock; | 2861 | goto out_unlock; |
2862 | } | 2862 | } |
2863 | |||
2864 | *new = true; | ||
2865 | } else { | 2863 | } else { |
2866 | struct nvme_ns_ids ids; | 2864 | struct nvme_ns_ids ids; |
2867 | 2865 | ||
@@ -2873,8 +2871,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, | |||
2873 | ret = -EINVAL; | 2871 | ret = -EINVAL; |
2874 | goto out_unlock; | 2872 | goto out_unlock; |
2875 | } | 2873 | } |
2876 | |||
2877 | *new = false; | ||
2878 | } | 2874 | } |
2879 | 2875 | ||
2880 | list_add_tail(&ns->siblings, &head->list); | 2876 | list_add_tail(&ns->siblings, &head->list); |
@@ -2945,7 +2941,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
2945 | struct nvme_id_ns *id; | 2941 | struct nvme_id_ns *id; |
2946 | char disk_name[DISK_NAME_LEN]; | 2942 | char disk_name[DISK_NAME_LEN]; |
2947 | int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; | 2943 | int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; |
2948 | bool new = true; | ||
2949 | 2944 | ||
2950 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); | 2945 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); |
2951 | if (!ns) | 2946 | if (!ns) |
@@ -2971,7 +2966,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
2971 | if (id->ncap == 0) | 2966 | if (id->ncap == 0) |
2972 | goto out_free_id; | 2967 | goto out_free_id; |
2973 | 2968 | ||
2974 | if (nvme_init_ns_head(ns, nsid, id, &new)) | 2969 | if (nvme_init_ns_head(ns, nsid, id)) |
2975 | goto out_free_id; | 2970 | goto out_free_id; |
2976 | nvme_setup_streams_ns(ctrl, ns); | 2971 | nvme_setup_streams_ns(ctrl, ns); |
2977 | 2972 | ||
@@ -3037,9 +3032,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
3037 | pr_warn("%s: failed to register lightnvm sysfs group for identification\n", | 3032 | pr_warn("%s: failed to register lightnvm sysfs group for identification\n", |
3038 | ns->disk->disk_name); | 3033 | ns->disk->disk_name); |
3039 | 3034 | ||
3040 | if (new) | 3035 | nvme_mpath_add_disk(ns->head); |
3041 | nvme_mpath_add_disk(ns->head); | ||
3042 | nvme_mpath_add_disk_links(ns); | ||
3043 | return; | 3036 | return; |
3044 | out_unlink_ns: | 3037 | out_unlink_ns: |
3045 | mutex_lock(&ctrl->subsys->lock); | 3038 | mutex_lock(&ctrl->subsys->lock); |
@@ -3059,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) | |||
3059 | return; | 3052 | return; |
3060 | 3053 | ||
3061 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { | 3054 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { |
3062 | nvme_mpath_remove_disk_links(ns); | ||
3063 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, | 3055 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, |
3064 | &nvme_ns_id_attr_group); | 3056 | &nvme_ns_id_attr_group); |
3065 | if (ns->ndev) | 3057 | if (ns->ndev) |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 5dd4ceefed8f..8f0f34d06d46 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
@@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect); | |||
493 | */ | 493 | */ |
494 | int nvmf_register_transport(struct nvmf_transport_ops *ops) | 494 | int nvmf_register_transport(struct nvmf_transport_ops *ops) |
495 | { | 495 | { |
496 | if (!ops->create_ctrl || !ops->module) | 496 | if (!ops->create_ctrl) |
497 | return -EINVAL; | 497 | return -EINVAL; |
498 | 498 | ||
499 | down_write(&nvmf_transports_rwsem); | 499 | down_write(&nvmf_transports_rwsem); |
@@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, | |||
650 | ret = -EINVAL; | 650 | ret = -EINVAL; |
651 | goto out; | 651 | goto out; |
652 | } | 652 | } |
653 | if (opts->discovery_nqn) { | ||
654 | pr_debug("Ignoring nr_io_queues value for discovery controller\n"); | ||
655 | break; | ||
656 | } | ||
657 | |||
653 | opts->nr_io_queues = min_t(unsigned int, | 658 | opts->nr_io_queues = min_t(unsigned int, |
654 | num_online_cpus(), token); | 659 | num_online_cpus(), token); |
655 | break; | 660 | break; |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 7f51f8414b97..1dc1387b7134 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, | |||
1206 | sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); | 1206 | sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); |
1207 | 1207 | ||
1208 | assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | 1208 | assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); |
1209 | assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize); | 1209 | assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); |
1210 | /* Linux supports only Dynamic controllers */ | 1210 | /* Linux supports only Dynamic controllers */ |
1211 | assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); | 1211 | assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); |
1212 | uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); | 1212 | uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); |
@@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
1321 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); | 1321 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); |
1322 | conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | 1322 | conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); |
1323 | conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); | 1323 | conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); |
1324 | conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize); | 1324 | conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); |
1325 | 1325 | ||
1326 | lsop->queue = queue; | 1326 | lsop->queue = queue; |
1327 | lsreq->rqstaddr = conn_rqst; | 1327 | lsreq->rqstaddr = conn_rqst; |
@@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) | |||
2481 | goto out_free_tag_set; | 2481 | goto out_free_tag_set; |
2482 | } | 2482 | } |
2483 | 2483 | ||
2484 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2484 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2485 | if (ret) | 2485 | if (ret) |
2486 | goto out_cleanup_blk_queue; | 2486 | goto out_cleanup_blk_queue; |
2487 | 2487 | ||
2488 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2488 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2489 | if (ret) | 2489 | if (ret) |
2490 | goto out_delete_hw_queues; | 2490 | goto out_delete_hw_queues; |
2491 | 2491 | ||
@@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) | |||
2532 | if (ret) | 2532 | if (ret) |
2533 | goto out_free_io_queues; | 2533 | goto out_free_io_queues; |
2534 | 2534 | ||
2535 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2535 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2536 | if (ret) | 2536 | if (ret) |
2537 | goto out_free_io_queues; | 2537 | goto out_free_io_queues; |
2538 | 2538 | ||
2539 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2539 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2540 | if (ret) | 2540 | if (ret) |
2541 | goto out_delete_hw_queues; | 2541 | goto out_delete_hw_queues; |
2542 | 2542 | ||
@@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2632 | nvme_fc_init_queue(ctrl, 0); | 2632 | nvme_fc_init_queue(ctrl, 0); |
2633 | 2633 | ||
2634 | ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, | 2634 | ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, |
2635 | NVME_AQ_BLK_MQ_DEPTH); | 2635 | NVME_AQ_DEPTH); |
2636 | if (ret) | 2636 | if (ret) |
2637 | goto out_free_queue; | 2637 | goto out_free_queue; |
2638 | 2638 | ||
2639 | ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], | 2639 | ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], |
2640 | NVME_AQ_BLK_MQ_DEPTH, | 2640 | NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); |
2641 | (NVME_AQ_BLK_MQ_DEPTH / 4)); | ||
2642 | if (ret) | 2641 | if (ret) |
2643 | goto out_delete_hw_queue; | 2642 | goto out_delete_hw_queue; |
2644 | 2643 | ||
@@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2666 | } | 2665 | } |
2667 | 2666 | ||
2668 | ctrl->ctrl.sqsize = | 2667 | ctrl->ctrl.sqsize = |
2669 | min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize); | 2668 | min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); |
2670 | 2669 | ||
2671 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); | 2670 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); |
2672 | if (ret) | 2671 | if (ret) |
@@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2699 | opts->queue_size = ctrl->ctrl.maxcmd; | 2698 | opts->queue_size = ctrl->ctrl.maxcmd; |
2700 | } | 2699 | } |
2701 | 2700 | ||
2701 | if (opts->queue_size > ctrl->ctrl.sqsize + 1) { | ||
2702 | /* warn if sqsize is lower than queue_size */ | ||
2703 | dev_warn(ctrl->ctrl.device, | ||
2704 | "queue_size %zu > ctrl sqsize %u, clamping down\n", | ||
2705 | opts->queue_size, ctrl->ctrl.sqsize + 1); | ||
2706 | opts->queue_size = ctrl->ctrl.sqsize + 1; | ||
2707 | } | ||
2708 | |||
2702 | ret = nvme_fc_init_aen_ops(ctrl); | 2709 | ret = nvme_fc_init_aen_ops(ctrl); |
2703 | if (ret) | 2710 | if (ret) |
2704 | goto out_term_aen_ops; | 2711 | goto out_term_aen_ops; |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 3b211d9e58b8..060f69e03427 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
@@ -198,30 +198,16 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head) | |||
198 | { | 198 | { |
199 | if (!head->disk) | 199 | if (!head->disk) |
200 | return; | 200 | return; |
201 | device_add_disk(&head->subsys->dev, head->disk); | ||
202 | if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, | ||
203 | &nvme_ns_id_attr_group)) | ||
204 | pr_warn("%s: failed to create sysfs group for identification\n", | ||
205 | head->disk->disk_name); | ||
206 | } | ||
207 | |||
208 | void nvme_mpath_add_disk_links(struct nvme_ns *ns) | ||
209 | { | ||
210 | struct kobject *slave_disk_kobj, *holder_disk_kobj; | ||
211 | |||
212 | if (!ns->head->disk) | ||
213 | return; | ||
214 | |||
215 | slave_disk_kobj = &disk_to_dev(ns->disk)->kobj; | ||
216 | if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj, | ||
217 | kobject_name(slave_disk_kobj))) | ||
218 | return; | ||
219 | 201 | ||
220 | holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj; | 202 | mutex_lock(&head->subsys->lock); |
221 | if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj, | 203 | if (!(head->disk->flags & GENHD_FL_UP)) { |
222 | kobject_name(holder_disk_kobj))) | 204 | device_add_disk(&head->subsys->dev, head->disk); |
223 | sysfs_remove_link(ns->head->disk->slave_dir, | 205 | if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, |
224 | kobject_name(slave_disk_kobj)); | 206 | &nvme_ns_id_attr_group)) |
207 | pr_warn("%s: failed to create sysfs group for identification\n", | ||
208 | head->disk->disk_name); | ||
209 | } | ||
210 | mutex_unlock(&head->subsys->lock); | ||
225 | } | 211 | } |
226 | 212 | ||
227 | void nvme_mpath_remove_disk(struct nvme_ns_head *head) | 213 | void nvme_mpath_remove_disk(struct nvme_ns_head *head) |
@@ -238,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) | |||
238 | blk_cleanup_queue(head->disk->queue); | 224 | blk_cleanup_queue(head->disk->queue); |
239 | put_disk(head->disk); | 225 | put_disk(head->disk); |
240 | } | 226 | } |
241 | |||
242 | void nvme_mpath_remove_disk_links(struct nvme_ns *ns) | ||
243 | { | ||
244 | if (!ns->head->disk) | ||
245 | return; | ||
246 | |||
247 | sysfs_remove_link(ns->disk->part0.holder_dir, | ||
248 | kobject_name(&disk_to_dev(ns->head->disk)->kobj)); | ||
249 | sysfs_remove_link(ns->head->disk->slave_dir, | ||
250 | kobject_name(&disk_to_dev(ns->disk)->kobj)); | ||
251 | } | ||
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 0521e4707d1c..d733b14ede9d 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -410,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error); | |||
410 | void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); | 410 | void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); |
411 | int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); | 411 | int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); |
412 | void nvme_mpath_add_disk(struct nvme_ns_head *head); | 412 | void nvme_mpath_add_disk(struct nvme_ns_head *head); |
413 | void nvme_mpath_add_disk_links(struct nvme_ns *ns); | ||
414 | void nvme_mpath_remove_disk(struct nvme_ns_head *head); | 413 | void nvme_mpath_remove_disk(struct nvme_ns_head *head); |
415 | void nvme_mpath_remove_disk_links(struct nvme_ns *ns); | ||
416 | 414 | ||
417 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) | 415 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) |
418 | { | 416 | { |
@@ -454,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head) | |||
454 | static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) | 452 | static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) |
455 | { | 453 | { |
456 | } | 454 | } |
457 | static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns) | ||
458 | { | ||
459 | } | ||
460 | static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns) | ||
461 | { | ||
462 | } | ||
463 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) | 455 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) |
464 | { | 456 | { |
465 | } | 457 | } |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 73036d2fbbd5..b6f43b738f03 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) | |||
1153 | if (!(csts & NVME_CSTS_CFS) && !nssro) | 1153 | if (!(csts & NVME_CSTS_CFS) && !nssro) |
1154 | return false; | 1154 | return false; |
1155 | 1155 | ||
1156 | /* If PCI error recovery process is happening, we cannot reset or | ||
1157 | * the recovery mechanism will surely fail. | ||
1158 | */ | ||
1159 | if (pci_channel_offline(to_pci_dev(dev->dev))) | ||
1160 | return false; | ||
1161 | |||
1162 | return true; | 1156 | return true; |
1163 | } | 1157 | } |
1164 | 1158 | ||
@@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) | |||
1189 | struct nvme_command cmd; | 1183 | struct nvme_command cmd; |
1190 | u32 csts = readl(dev->bar + NVME_REG_CSTS); | 1184 | u32 csts = readl(dev->bar + NVME_REG_CSTS); |
1191 | 1185 | ||
1186 | /* If PCI error recovery process is happening, we cannot reset or | ||
1187 | * the recovery mechanism will surely fail. | ||
1188 | */ | ||
1189 | mb(); | ||
1190 | if (pci_channel_offline(to_pci_dev(dev->dev))) | ||
1191 | return BLK_EH_RESET_TIMER; | ||
1192 | |||
1192 | /* | 1193 | /* |
1193 | * Reset immediately if the controller is failed | 1194 | * Reset immediately if the controller is failed |
1194 | */ | 1195 | */ |
@@ -1459,7 +1460,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | |||
1459 | nvmeq->cq_vector = qid - 1; | 1460 | nvmeq->cq_vector = qid - 1; |
1460 | result = adapter_alloc_cq(dev, qid, nvmeq); | 1461 | result = adapter_alloc_cq(dev, qid, nvmeq); |
1461 | if (result < 0) | 1462 | if (result < 0) |
1462 | return result; | 1463 | goto release_vector; |
1463 | 1464 | ||
1464 | result = adapter_alloc_sq(dev, qid, nvmeq); | 1465 | result = adapter_alloc_sq(dev, qid, nvmeq); |
1465 | if (result < 0) | 1466 | if (result < 0) |
@@ -1473,9 +1474,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | |||
1473 | return result; | 1474 | return result; |
1474 | 1475 | ||
1475 | release_sq: | 1476 | release_sq: |
1477 | dev->online_queues--; | ||
1476 | adapter_delete_sq(dev, qid); | 1478 | adapter_delete_sq(dev, qid); |
1477 | release_cq: | 1479 | release_cq: |
1478 | adapter_delete_cq(dev, qid); | 1480 | adapter_delete_cq(dev, qid); |
1481 | release_vector: | ||
1482 | nvmeq->cq_vector = -1; | ||
1479 | return result; | 1483 | return result; |
1480 | } | 1484 | } |
1481 | 1485 | ||
@@ -1910,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
1910 | int result, nr_io_queues; | 1914 | int result, nr_io_queues; |
1911 | unsigned long size; | 1915 | unsigned long size; |
1912 | 1916 | ||
1913 | nr_io_queues = num_present_cpus(); | 1917 | nr_io_queues = num_possible_cpus(); |
1914 | result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); | 1918 | result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); |
1915 | if (result < 0) | 1919 | if (result < 0) |
1916 | return result; | 1920 | return result; |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3a51ed50eff2..4d84a73ee12d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -1051,7 +1051,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, | |||
1051 | struct nvme_rdma_device *dev = queue->device; | 1051 | struct nvme_rdma_device *dev = queue->device; |
1052 | struct ib_device *ibdev = dev->dev; | 1052 | struct ib_device *ibdev = dev->dev; |
1053 | 1053 | ||
1054 | if (!blk_rq_bytes(rq)) | 1054 | if (!blk_rq_payload_bytes(rq)) |
1055 | return; | 1055 | return; |
1056 | 1056 | ||
1057 | if (req->mr) { | 1057 | if (req->mr) { |
@@ -1166,7 +1166,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, | |||
1166 | 1166 | ||
1167 | c->common.flags |= NVME_CMD_SGL_METABUF; | 1167 | c->common.flags |= NVME_CMD_SGL_METABUF; |
1168 | 1168 | ||
1169 | if (!blk_rq_bytes(rq)) | 1169 | if (!blk_rq_payload_bytes(rq)) |
1170 | return nvme_rdma_set_sg_null(c); | 1170 | return nvme_rdma_set_sg_null(c); |
1171 | 1171 | ||
1172 | req->sg_table.sgl = req->first_sgl; | 1172 | req->sg_table.sgl = req->first_sgl; |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 0bd737117a80..a78029e4e5f4 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -520,9 +520,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, | |||
520 | goto fail; | 520 | goto fail; |
521 | } | 521 | } |
522 | 522 | ||
523 | /* either variant of SGLs is fine, as we don't support metadata */ | 523 | /* |
524 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && | 524 | * For fabrics, PSDT field shall describe metadata pointer (MPTR) that |
525 | (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { | 525 | * contains an address of a single contiguous physical buffer that is |
526 | * byte aligned. | ||
527 | */ | ||
528 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { | ||
526 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | 529 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
527 | goto fail; | 530 | goto fail; |
528 | } | 531 | } |
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 7991ec3a17db..861d1509b22b 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c | |||
@@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
184 | return BLK_STS_OK; | 184 | return BLK_STS_OK; |
185 | } | 185 | } |
186 | 186 | ||
187 | if (blk_rq_bytes(req)) { | 187 | if (blk_rq_payload_bytes(req)) { |
188 | iod->sg_table.sgl = iod->first_sgl; | 188 | iod->sg_table.sgl = iod->first_sgl; |
189 | if (sg_alloc_table_chained(&iod->sg_table, | 189 | if (sg_alloc_table_chained(&iod->sg_table, |
190 | blk_rq_nr_phys_segments(req), | 190 | blk_rq_nr_phys_segments(req), |
@@ -193,7 +193,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
193 | 193 | ||
194 | iod->req.sg = iod->sg_table.sgl; | 194 | iod->req.sg = iod->sg_table.sgl; |
195 | iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); | 195 | iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); |
196 | iod->req.transfer_len = blk_rq_bytes(req); | 196 | iod->req.transfer_len = blk_rq_payload_bytes(req); |
197 | } | 197 | } |
198 | 198 | ||
199 | blk_mq_start_request(req); | 199 | blk_mq_start_request(req); |
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index 8de2d5c69b1d..dc9303abda42 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c | |||
@@ -613,7 +613,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) | |||
613 | /* setup bus numbers */ | 613 | /* setup bus numbers */ |
614 | val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); | 614 | val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); |
615 | val &= 0xff000000; | 615 | val &= 0xff000000; |
616 | val |= 0x00010100; | 616 | val |= 0x00ff0100; |
617 | dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); | 617 | dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); |
618 | 618 | ||
619 | /* setup command register */ | 619 | /* setup command register */ |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 369d48d6c6f1..365447240d95 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -401,6 +401,10 @@ void pci_release_resource(struct pci_dev *dev, int resno) | |||
401 | struct resource *res = dev->resource + resno; | 401 | struct resource *res = dev->resource + resno; |
402 | 402 | ||
403 | pci_info(dev, "BAR %d: releasing %pR\n", resno, res); | 403 | pci_info(dev, "BAR %d: releasing %pR\n", resno, res); |
404 | |||
405 | if (!res->parent) | ||
406 | return; | ||
407 | |||
404 | release_resource(res); | 408 | release_resource(res); |
405 | res->end = resource_size(res) - 1; | 409 | res->end = resource_size(res) - 1; |
406 | res->start = 0; | 410 | res->start = 0; |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 0c2ed11c0603..f63db346c219 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
@@ -638,7 +638,7 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) | |||
638 | if (irq_is_percpu_devid(irq)) | 638 | if (irq_is_percpu_devid(irq)) |
639 | disable_percpu_irq(irq); | 639 | disable_percpu_irq(irq); |
640 | else | 640 | else |
641 | disable_irq(irq); | 641 | disable_irq_nosync(irq); |
642 | } | 642 | } |
643 | 643 | ||
644 | per_cpu(cpu_armpmu, cpu) = NULL; | 644 | per_cpu(cpu_armpmu, cpu) = NULL; |
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c index c5ff4525edef..c5493ea51282 100644 --- a/drivers/phy/qualcomm/phy-qcom-ufs.c +++ b/drivers/phy/qualcomm/phy-qcom-ufs.c | |||
@@ -675,3 +675,8 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy) | |||
675 | return 0; | 675 | return 0; |
676 | } | 676 | } |
677 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); | 677 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); |
678 | |||
679 | MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>"); | ||
680 | MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>"); | ||
681 | MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY"); | ||
682 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c index 1fda9d6c7ea3..4b91ff74779b 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-axg.c +++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c | |||
@@ -716,7 +716,7 @@ static const char * const uart_b_groups[] = { | |||
716 | "uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x", | 716 | "uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x", |
717 | }; | 717 | }; |
718 | 718 | ||
719 | static const char * const uart_ao_b_gpioz_groups[] = { | 719 | static const char * const uart_ao_b_z_groups[] = { |
720 | "uart_ao_tx_b_z", "uart_ao_rx_b_z", | 720 | "uart_ao_tx_b_z", "uart_ao_rx_b_z", |
721 | "uart_ao_cts_b_z", "uart_ao_rts_b_z", | 721 | "uart_ao_cts_b_z", "uart_ao_rts_b_z", |
722 | }; | 722 | }; |
@@ -855,7 +855,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = { | |||
855 | FUNCTION(nand), | 855 | FUNCTION(nand), |
856 | FUNCTION(uart_a), | 856 | FUNCTION(uart_a), |
857 | FUNCTION(uart_b), | 857 | FUNCTION(uart_b), |
858 | FUNCTION(uart_ao_b_gpioz), | 858 | FUNCTION(uart_ao_b_z), |
859 | FUNCTION(i2c0), | 859 | FUNCTION(i2c0), |
860 | FUNCTION(i2c1), | 860 | FUNCTION(i2c1), |
861 | FUNCTION(i2c2), | 861 | FUNCTION(i2c2), |
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 6dec6ab13300..d8599736a41a 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c | |||
@@ -423,7 +423,7 @@ static int chromeos_laptop_probe(struct platform_device *pdev) | |||
423 | return ret; | 423 | return ret; |
424 | } | 424 | } |
425 | 425 | ||
426 | static const struct chromeos_laptop samsung_series_5_550 = { | 426 | static struct chromeos_laptop samsung_series_5_550 = { |
427 | .i2c_peripherals = { | 427 | .i2c_peripherals = { |
428 | /* Touchpad. */ | 428 | /* Touchpad. */ |
429 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, | 429 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, |
@@ -432,14 +432,14 @@ static const struct chromeos_laptop samsung_series_5_550 = { | |||
432 | }, | 432 | }, |
433 | }; | 433 | }; |
434 | 434 | ||
435 | static const struct chromeos_laptop samsung_series_5 = { | 435 | static struct chromeos_laptop samsung_series_5 = { |
436 | .i2c_peripherals = { | 436 | .i2c_peripherals = { |
437 | /* Light Sensor. */ | 437 | /* Light Sensor. */ |
438 | { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS }, | 438 | { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS }, |
439 | }, | 439 | }, |
440 | }; | 440 | }; |
441 | 441 | ||
442 | static const struct chromeos_laptop chromebook_pixel = { | 442 | static struct chromeos_laptop chromebook_pixel = { |
443 | .i2c_peripherals = { | 443 | .i2c_peripherals = { |
444 | /* Touch Screen. */ | 444 | /* Touch Screen. */ |
445 | { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL }, | 445 | { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL }, |
@@ -450,14 +450,14 @@ static const struct chromeos_laptop chromebook_pixel = { | |||
450 | }, | 450 | }, |
451 | }; | 451 | }; |
452 | 452 | ||
453 | static const struct chromeos_laptop hp_chromebook_14 = { | 453 | static struct chromeos_laptop hp_chromebook_14 = { |
454 | .i2c_peripherals = { | 454 | .i2c_peripherals = { |
455 | /* Touchpad. */ | 455 | /* Touchpad. */ |
456 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, | 456 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, |
457 | }, | 457 | }, |
458 | }; | 458 | }; |
459 | 459 | ||
460 | static const struct chromeos_laptop dell_chromebook_11 = { | 460 | static struct chromeos_laptop dell_chromebook_11 = { |
461 | .i2c_peripherals = { | 461 | .i2c_peripherals = { |
462 | /* Touchpad. */ | 462 | /* Touchpad. */ |
463 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, | 463 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, |
@@ -466,28 +466,28 @@ static const struct chromeos_laptop dell_chromebook_11 = { | |||
466 | }, | 466 | }, |
467 | }; | 467 | }; |
468 | 468 | ||
469 | static const struct chromeos_laptop toshiba_cb35 = { | 469 | static struct chromeos_laptop toshiba_cb35 = { |
470 | .i2c_peripherals = { | 470 | .i2c_peripherals = { |
471 | /* Touchpad. */ | 471 | /* Touchpad. */ |
472 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, | 472 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, |
473 | }, | 473 | }, |
474 | }; | 474 | }; |
475 | 475 | ||
476 | static const struct chromeos_laptop acer_c7_chromebook = { | 476 | static struct chromeos_laptop acer_c7_chromebook = { |
477 | .i2c_peripherals = { | 477 | .i2c_peripherals = { |
478 | /* Touchpad. */ | 478 | /* Touchpad. */ |
479 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, | 479 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, |
480 | }, | 480 | }, |
481 | }; | 481 | }; |
482 | 482 | ||
483 | static const struct chromeos_laptop acer_ac700 = { | 483 | static struct chromeos_laptop acer_ac700 = { |
484 | .i2c_peripherals = { | 484 | .i2c_peripherals = { |
485 | /* Light Sensor. */ | 485 | /* Light Sensor. */ |
486 | { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, | 486 | { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, |
487 | }, | 487 | }, |
488 | }; | 488 | }; |
489 | 489 | ||
490 | static const struct chromeos_laptop acer_c720 = { | 490 | static struct chromeos_laptop acer_c720 = { |
491 | .i2c_peripherals = { | 491 | .i2c_peripherals = { |
492 | /* Touchscreen. */ | 492 | /* Touchscreen. */ |
493 | { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, | 493 | { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, |
@@ -500,14 +500,14 @@ static const struct chromeos_laptop acer_c720 = { | |||
500 | }, | 500 | }, |
501 | }; | 501 | }; |
502 | 502 | ||
503 | static const struct chromeos_laptop hp_pavilion_14_chromebook = { | 503 | static struct chromeos_laptop hp_pavilion_14_chromebook = { |
504 | .i2c_peripherals = { | 504 | .i2c_peripherals = { |
505 | /* Touchpad. */ | 505 | /* Touchpad. */ |
506 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, | 506 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, |
507 | }, | 507 | }, |
508 | }; | 508 | }; |
509 | 509 | ||
510 | static const struct chromeos_laptop cr48 = { | 510 | static struct chromeos_laptop cr48 = { |
511 | .i2c_peripherals = { | 511 | .i2c_peripherals = { |
512 | /* Light Sensor. */ | 512 | /* Light Sensor. */ |
513 | { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, | 513 | { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 9a8f96465cdc..51ebc5a6053f 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -105,31 +105,45 @@ config ASUS_LAPTOP | |||
105 | 105 | ||
106 | If you have an ACPI-compatible ASUS laptop, say Y or M here. | 106 | If you have an ACPI-compatible ASUS laptop, say Y or M here. |
107 | 107 | ||
108 | # | ||
109 | # The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those | ||
110 | # backends are selected. The "depends" line prevents a configuration | ||
111 | # where DELL_SMBIOS=y while either of those dependencies =m. | ||
112 | # | ||
108 | config DELL_SMBIOS | 113 | config DELL_SMBIOS |
109 | tristate | 114 | tristate "Dell SMBIOS driver" |
115 | depends on DCDBAS || DCDBAS=n | ||
116 | depends on ACPI_WMI || ACPI_WMI=n | ||
117 | ---help--- | ||
118 | This provides support for the Dell SMBIOS calling interface. | ||
119 | If you have a Dell computer you should enable this option. | ||
120 | |||
121 | Be sure to select at least one backend for it to work properly. | ||
110 | 122 | ||
111 | config DELL_SMBIOS_WMI | 123 | config DELL_SMBIOS_WMI |
112 | tristate "Dell SMBIOS calling interface (WMI implementation)" | 124 | bool "Dell SMBIOS driver WMI backend" |
125 | default y | ||
113 | depends on ACPI_WMI | 126 | depends on ACPI_WMI |
114 | select DELL_WMI_DESCRIPTOR | 127 | select DELL_WMI_DESCRIPTOR |
115 | select DELL_SMBIOS | 128 | depends on DELL_SMBIOS |
116 | ---help--- | 129 | ---help--- |
117 | This provides an implementation for the Dell SMBIOS calling interface | 130 | This provides an implementation for the Dell SMBIOS calling interface |
118 | communicated over ACPI-WMI. | 131 | communicated over ACPI-WMI. |
119 | 132 | ||
120 | If you have a Dell computer from >2007 you should say Y or M here. | 133 | If you have a Dell computer from >2007 you should say Y here. |
121 | If you aren't sure and this module doesn't work for your computer | 134 | If you aren't sure and this module doesn't work for your computer |
122 | it just won't load. | 135 | it just won't load. |
123 | 136 | ||
124 | config DELL_SMBIOS_SMM | 137 | config DELL_SMBIOS_SMM |
125 | tristate "Dell SMBIOS calling interface (SMM implementation)" | 138 | bool "Dell SMBIOS driver SMM backend" |
139 | default y | ||
126 | depends on DCDBAS | 140 | depends on DCDBAS |
127 | select DELL_SMBIOS | 141 | depends on DELL_SMBIOS |
128 | ---help--- | 142 | ---help--- |
129 | This provides an implementation for the Dell SMBIOS calling interface | 143 | This provides an implementation for the Dell SMBIOS calling interface |
130 | communicated over SMI/SMM. | 144 | communicated over SMI/SMM. |
131 | 145 | ||
132 | If you have a Dell computer from <=2017 you should say Y or M here. | 146 | If you have a Dell computer from <=2017 you should say Y here. |
133 | If you aren't sure and this module doesn't work for your computer | 147 | If you aren't sure and this module doesn't work for your computer |
134 | it just won't load. | 148 | it just won't load. |
135 | 149 | ||
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index c388608ad2a3..2ba6cb795338 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile | |||
@@ -13,8 +13,9 @@ obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o | |||
13 | obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o | 13 | obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o |
14 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o | 14 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o |
15 | obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o | 15 | obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o |
16 | obj-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o | 16 | dell-smbios-objs := dell-smbios-base.o |
17 | obj-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o | 17 | dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o |
18 | dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o | ||
18 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o | 19 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o |
19 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o | 20 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o |
20 | obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o | 21 | obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o |
diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios-base.c index 8541cde4cb7d..2485c80a9fdd 100644 --- a/drivers/platform/x86/dell-smbios.c +++ b/drivers/platform/x86/dell-smbios-base.c | |||
@@ -36,7 +36,7 @@ static DEFINE_MUTEX(smbios_mutex); | |||
36 | struct smbios_device { | 36 | struct smbios_device { |
37 | struct list_head list; | 37 | struct list_head list; |
38 | struct device *device; | 38 | struct device *device; |
39 | int (*call_fn)(struct calling_interface_buffer *); | 39 | int (*call_fn)(struct calling_interface_buffer *arg); |
40 | }; | 40 | }; |
41 | 41 | ||
42 | struct smbios_call { | 42 | struct smbios_call { |
@@ -352,8 +352,10 @@ static void __init parse_da_table(const struct dmi_header *dm) | |||
352 | struct calling_interface_structure *table = | 352 | struct calling_interface_structure *table = |
353 | container_of(dm, struct calling_interface_structure, header); | 353 | container_of(dm, struct calling_interface_structure, header); |
354 | 354 | ||
355 | /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least | 355 | /* |
356 | 6 bytes of entry */ | 356 | * 4 bytes of table header, plus 7 bytes of Dell header |
357 | * plus at least 6 bytes of entry | ||
358 | */ | ||
357 | 359 | ||
358 | if (dm->length < 17) | 360 | if (dm->length < 17) |
359 | return; | 361 | return; |
@@ -554,7 +556,7 @@ static void free_group(struct platform_device *pdev) | |||
554 | static int __init dell_smbios_init(void) | 556 | static int __init dell_smbios_init(void) |
555 | { | 557 | { |
556 | const struct dmi_device *valid; | 558 | const struct dmi_device *valid; |
557 | int ret; | 559 | int ret, wmi, smm; |
558 | 560 | ||
559 | valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL); | 561 | valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL); |
560 | if (!valid) { | 562 | if (!valid) { |
@@ -589,8 +591,24 @@ static int __init dell_smbios_init(void) | |||
589 | if (ret) | 591 | if (ret) |
590 | goto fail_create_group; | 592 | goto fail_create_group; |
591 | 593 | ||
594 | /* register backends */ | ||
595 | wmi = init_dell_smbios_wmi(); | ||
596 | if (wmi) | ||
597 | pr_debug("Failed to initialize WMI backend: %d\n", wmi); | ||
598 | smm = init_dell_smbios_smm(); | ||
599 | if (smm) | ||
600 | pr_debug("Failed to initialize SMM backend: %d\n", smm); | ||
601 | if (wmi && smm) { | ||
602 | pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n", | ||
603 | wmi, smm); | ||
604 | goto fail_sysfs; | ||
605 | } | ||
606 | |||
592 | return 0; | 607 | return 0; |
593 | 608 | ||
609 | fail_sysfs: | ||
610 | free_group(platform_device); | ||
611 | |||
594 | fail_create_group: | 612 | fail_create_group: |
595 | platform_device_del(platform_device); | 613 | platform_device_del(platform_device); |
596 | 614 | ||
@@ -607,6 +625,8 @@ fail_platform_driver: | |||
607 | 625 | ||
608 | static void __exit dell_smbios_exit(void) | 626 | static void __exit dell_smbios_exit(void) |
609 | { | 627 | { |
628 | exit_dell_smbios_wmi(); | ||
629 | exit_dell_smbios_smm(); | ||
610 | mutex_lock(&smbios_mutex); | 630 | mutex_lock(&smbios_mutex); |
611 | if (platform_device) { | 631 | if (platform_device) { |
612 | free_group(platform_device); | 632 | free_group(platform_device); |
@@ -617,11 +637,12 @@ static void __exit dell_smbios_exit(void) | |||
617 | mutex_unlock(&smbios_mutex); | 637 | mutex_unlock(&smbios_mutex); |
618 | } | 638 | } |
619 | 639 | ||
620 | subsys_initcall(dell_smbios_init); | 640 | module_init(dell_smbios_init); |
621 | module_exit(dell_smbios_exit); | 641 | module_exit(dell_smbios_exit); |
622 | 642 | ||
623 | MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); | 643 | MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); |
624 | MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); | 644 | MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); |
625 | MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); | 645 | MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); |
646 | MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); | ||
626 | MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS"); | 647 | MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS"); |
627 | MODULE_LICENSE("GPL"); | 648 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c index 89f65c4651a0..e9e9da556318 100644 --- a/drivers/platform/x86/dell-smbios-smm.c +++ b/drivers/platform/x86/dell-smbios-smm.c | |||
@@ -58,7 +58,7 @@ static const struct dmi_system_id dell_device_table[] __initconst = { | |||
58 | }; | 58 | }; |
59 | MODULE_DEVICE_TABLE(dmi, dell_device_table); | 59 | MODULE_DEVICE_TABLE(dmi, dell_device_table); |
60 | 60 | ||
61 | static void __init parse_da_table(const struct dmi_header *dm) | 61 | static void parse_da_table(const struct dmi_header *dm) |
62 | { | 62 | { |
63 | struct calling_interface_structure *table = | 63 | struct calling_interface_structure *table = |
64 | container_of(dm, struct calling_interface_structure, header); | 64 | container_of(dm, struct calling_interface_structure, header); |
@@ -73,7 +73,7 @@ static void __init parse_da_table(const struct dmi_header *dm) | |||
73 | da_command_code = table->cmdIOCode; | 73 | da_command_code = table->cmdIOCode; |
74 | } | 74 | } |
75 | 75 | ||
76 | static void __init find_cmd_address(const struct dmi_header *dm, void *dummy) | 76 | static void find_cmd_address(const struct dmi_header *dm, void *dummy) |
77 | { | 77 | { |
78 | switch (dm->type) { | 78 | switch (dm->type) { |
79 | case 0xda: /* Calling interface */ | 79 | case 0xda: /* Calling interface */ |
@@ -128,7 +128,7 @@ static bool test_wsmt_enabled(void) | |||
128 | return false; | 128 | return false; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int __init dell_smbios_smm_init(void) | 131 | int init_dell_smbios_smm(void) |
132 | { | 132 | { |
133 | int ret; | 133 | int ret; |
134 | /* | 134 | /* |
@@ -176,7 +176,7 @@ fail_platform_device_alloc: | |||
176 | return ret; | 176 | return ret; |
177 | } | 177 | } |
178 | 178 | ||
179 | static void __exit dell_smbios_smm_exit(void) | 179 | void exit_dell_smbios_smm(void) |
180 | { | 180 | { |
181 | if (platform_device) { | 181 | if (platform_device) { |
182 | dell_smbios_unregister_device(&platform_device->dev); | 182 | dell_smbios_unregister_device(&platform_device->dev); |
@@ -184,13 +184,3 @@ static void __exit dell_smbios_smm_exit(void) | |||
184 | free_page((unsigned long)buffer); | 184 | free_page((unsigned long)buffer); |
185 | } | 185 | } |
186 | } | 186 | } |
187 | |||
188 | subsys_initcall(dell_smbios_smm_init); | ||
189 | module_exit(dell_smbios_smm_exit); | ||
190 | |||
191 | MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); | ||
192 | MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); | ||
193 | MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); | ||
194 | MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); | ||
195 | MODULE_DESCRIPTION("Dell SMBIOS communications over SMI"); | ||
196 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index 609557aa5868..fbefedb1c172 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c | |||
@@ -228,7 +228,7 @@ static const struct wmi_device_id dell_smbios_wmi_id_table[] = { | |||
228 | { }, | 228 | { }, |
229 | }; | 229 | }; |
230 | 230 | ||
231 | static void __init parse_b1_table(const struct dmi_header *dm) | 231 | static void parse_b1_table(const struct dmi_header *dm) |
232 | { | 232 | { |
233 | struct misc_bios_flags_structure *flags = | 233 | struct misc_bios_flags_structure *flags = |
234 | container_of(dm, struct misc_bios_flags_structure, header); | 234 | container_of(dm, struct misc_bios_flags_structure, header); |
@@ -242,7 +242,7 @@ static void __init parse_b1_table(const struct dmi_header *dm) | |||
242 | wmi_supported = 1; | 242 | wmi_supported = 1; |
243 | } | 243 | } |
244 | 244 | ||
245 | static void __init find_b1(const struct dmi_header *dm, void *dummy) | 245 | static void find_b1(const struct dmi_header *dm, void *dummy) |
246 | { | 246 | { |
247 | switch (dm->type) { | 247 | switch (dm->type) { |
248 | case 0xb1: /* misc bios flags */ | 248 | case 0xb1: /* misc bios flags */ |
@@ -261,7 +261,7 @@ static struct wmi_driver dell_smbios_wmi_driver = { | |||
261 | .filter_callback = dell_smbios_wmi_filter, | 261 | .filter_callback = dell_smbios_wmi_filter, |
262 | }; | 262 | }; |
263 | 263 | ||
264 | static int __init init_dell_smbios_wmi(void) | 264 | int init_dell_smbios_wmi(void) |
265 | { | 265 | { |
266 | dmi_walk(find_b1, NULL); | 266 | dmi_walk(find_b1, NULL); |
267 | 267 | ||
@@ -271,15 +271,9 @@ static int __init init_dell_smbios_wmi(void) | |||
271 | return wmi_driver_register(&dell_smbios_wmi_driver); | 271 | return wmi_driver_register(&dell_smbios_wmi_driver); |
272 | } | 272 | } |
273 | 273 | ||
274 | static void __exit exit_dell_smbios_wmi(void) | 274 | void exit_dell_smbios_wmi(void) |
275 | { | 275 | { |
276 | wmi_driver_unregister(&dell_smbios_wmi_driver); | 276 | wmi_driver_unregister(&dell_smbios_wmi_driver); |
277 | } | 277 | } |
278 | 278 | ||
279 | module_init(init_dell_smbios_wmi); | ||
280 | module_exit(exit_dell_smbios_wmi); | ||
281 | |||
282 | MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); | 279 | MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); |
283 | MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); | ||
284 | MODULE_DESCRIPTION("Dell SMBIOS communications over WMI"); | ||
285 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h index 138d478d9adc..d8adaf959740 100644 --- a/drivers/platform/x86/dell-smbios.h +++ b/drivers/platform/x86/dell-smbios.h | |||
@@ -75,4 +75,29 @@ int dell_laptop_register_notifier(struct notifier_block *nb); | |||
75 | int dell_laptop_unregister_notifier(struct notifier_block *nb); | 75 | int dell_laptop_unregister_notifier(struct notifier_block *nb); |
76 | void dell_laptop_call_notifier(unsigned long action, void *data); | 76 | void dell_laptop_call_notifier(unsigned long action, void *data); |
77 | 77 | ||
78 | #endif | 78 | /* for the supported backends */ |
79 | #ifdef CONFIG_DELL_SMBIOS_WMI | ||
80 | int init_dell_smbios_wmi(void); | ||
81 | void exit_dell_smbios_wmi(void); | ||
82 | #else /* CONFIG_DELL_SMBIOS_WMI */ | ||
83 | static inline int init_dell_smbios_wmi(void) | ||
84 | { | ||
85 | return -ENODEV; | ||
86 | } | ||
87 | static inline void exit_dell_smbios_wmi(void) | ||
88 | {} | ||
89 | #endif /* CONFIG_DELL_SMBIOS_WMI */ | ||
90 | |||
91 | #ifdef CONFIG_DELL_SMBIOS_SMM | ||
92 | int init_dell_smbios_smm(void); | ||
93 | void exit_dell_smbios_smm(void); | ||
94 | #else /* CONFIG_DELL_SMBIOS_SMM */ | ||
95 | static inline int init_dell_smbios_smm(void) | ||
96 | { | ||
97 | return -ENODEV; | ||
98 | } | ||
99 | static inline void exit_dell_smbios_smm(void) | ||
100 | {} | ||
101 | #endif /* CONFIG_DELL_SMBIOS_SMM */ | ||
102 | |||
103 | #endif /* _DELL_SMBIOS_H_ */ | ||
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 2c9927430d85..8d102195a392 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
@@ -714,7 +714,7 @@ static int __init dell_wmi_init(void) | |||
714 | 714 | ||
715 | return wmi_driver_register(&dell_wmi_driver); | 715 | return wmi_driver_register(&dell_wmi_driver); |
716 | } | 716 | } |
717 | module_init(dell_wmi_init); | 717 | late_initcall(dell_wmi_init); |
718 | 718 | ||
719 | static void __exit dell_wmi_exit(void) | 719 | static void __exit dell_wmi_exit(void) |
720 | { | 720 | { |
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index d1a01311c1a2..5e3df194723e 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c | |||
@@ -376,6 +376,7 @@ static int intel_hid_remove(struct platform_device *device) | |||
376 | { | 376 | { |
377 | acpi_handle handle = ACPI_HANDLE(&device->dev); | 377 | acpi_handle handle = ACPI_HANDLE(&device->dev); |
378 | 378 | ||
379 | device_init_wakeup(&device->dev, false); | ||
379 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); | 380 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); |
380 | intel_hid_set_enable(&device->dev, false); | 381 | intel_hid_set_enable(&device->dev, false); |
381 | intel_button_array_enable(&device->dev, false); | 382 | intel_button_array_enable(&device->dev, false); |
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index b703d6f5b099..c13780b8dabb 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/acpi.h> | 9 | #include <linux/acpi.h> |
10 | #include <linux/dmi.h> | ||
10 | #include <linux/input.h> | 11 | #include <linux/input.h> |
11 | #include <linux/input/sparse-keymap.h> | 12 | #include <linux/input/sparse-keymap.h> |
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
@@ -97,9 +98,35 @@ out_unknown: | |||
97 | dev_dbg(&device->dev, "unknown event index 0x%x\n", event); | 98 | dev_dbg(&device->dev, "unknown event index 0x%x\n", event); |
98 | } | 99 | } |
99 | 100 | ||
100 | static int intel_vbtn_probe(struct platform_device *device) | 101 | static void detect_tablet_mode(struct platform_device *device) |
101 | { | 102 | { |
103 | const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); | ||
104 | struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); | ||
105 | acpi_handle handle = ACPI_HANDLE(&device->dev); | ||
102 | struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; | 106 | struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; |
107 | union acpi_object *obj; | ||
108 | acpi_status status; | ||
109 | int m; | ||
110 | |||
111 | if (!(chassis_type && strcmp(chassis_type, "31") == 0)) | ||
112 | goto out; | ||
113 | |||
114 | status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); | ||
115 | if (ACPI_FAILURE(status)) | ||
116 | goto out; | ||
117 | |||
118 | obj = vgbs_output.pointer; | ||
119 | if (!(obj && obj->type == ACPI_TYPE_INTEGER)) | ||
120 | goto out; | ||
121 | |||
122 | m = !(obj->integer.value & TABLET_MODE_FLAG); | ||
123 | input_report_switch(priv->input_dev, SW_TABLET_MODE, m); | ||
124 | out: | ||
125 | kfree(vgbs_output.pointer); | ||
126 | } | ||
127 | |||
128 | static int intel_vbtn_probe(struct platform_device *device) | ||
129 | { | ||
103 | acpi_handle handle = ACPI_HANDLE(&device->dev); | 130 | acpi_handle handle = ACPI_HANDLE(&device->dev); |
104 | struct intel_vbtn_priv *priv; | 131 | struct intel_vbtn_priv *priv; |
105 | acpi_status status; | 132 | acpi_status status; |
@@ -122,22 +149,7 @@ static int intel_vbtn_probe(struct platform_device *device) | |||
122 | return err; | 149 | return err; |
123 | } | 150 | } |
124 | 151 | ||
125 | /* | 152 | detect_tablet_mode(device); |
126 | * VGBS being present and returning something means we have | ||
127 | * a tablet mode switch. | ||
128 | */ | ||
129 | status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); | ||
130 | if (ACPI_SUCCESS(status)) { | ||
131 | union acpi_object *obj = vgbs_output.pointer; | ||
132 | |||
133 | if (obj && obj->type == ACPI_TYPE_INTEGER) { | ||
134 | int m = !(obj->integer.value & TABLET_MODE_FLAG); | ||
135 | |||
136 | input_report_switch(priv->input_dev, SW_TABLET_MODE, m); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | kfree(vgbs_output.pointer); | ||
141 | 153 | ||
142 | status = acpi_install_notify_handler(handle, | 154 | status = acpi_install_notify_handler(handle, |
143 | ACPI_DEVICE_NOTIFY, | 155 | ACPI_DEVICE_NOTIFY, |
@@ -154,6 +166,7 @@ static int intel_vbtn_remove(struct platform_device *device) | |||
154 | { | 166 | { |
155 | acpi_handle handle = ACPI_HANDLE(&device->dev); | 167 | acpi_handle handle = ACPI_HANDLE(&device->dev); |
156 | 168 | ||
169 | device_init_wakeup(&device->dev, false); | ||
157 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); | 170 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); |
158 | 171 | ||
159 | /* | 172 | /* |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index c0c8945603cb..8796211ef24a 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
@@ -945,7 +945,7 @@ static int wmi_dev_probe(struct device *dev) | |||
945 | wblock->char_dev.mode = 0444; | 945 | wblock->char_dev.mode = 0444; |
946 | ret = misc_register(&wblock->char_dev); | 946 | ret = misc_register(&wblock->char_dev); |
947 | if (ret) { | 947 | if (ret) { |
948 | dev_warn(dev, "failed to register char dev: %d", ret); | 948 | dev_warn(dev, "failed to register char dev: %d\n", ret); |
949 | ret = -ENOMEM; | 949 | ret = -ENOMEM; |
950 | goto probe_misc_failure; | 950 | goto probe_misc_failure; |
951 | } | 951 | } |
@@ -1048,7 +1048,7 @@ static int wmi_create_device(struct device *wmi_bus_dev, | |||
1048 | 1048 | ||
1049 | if (result) { | 1049 | if (result) { |
1050 | dev_warn(wmi_bus_dev, | 1050 | dev_warn(wmi_bus_dev, |
1051 | "%s data block query control method not found", | 1051 | "%s data block query control method not found\n", |
1052 | method); | 1052 | method); |
1053 | return result; | 1053 | return result; |
1054 | } | 1054 | } |
@@ -1198,7 +1198,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) | |||
1198 | 1198 | ||
1199 | retval = device_add(&wblock->dev.dev); | 1199 | retval = device_add(&wblock->dev.dev); |
1200 | if (retval) { | 1200 | if (retval) { |
1201 | dev_err(wmi_bus_dev, "failed to register %pULL\n", | 1201 | dev_err(wmi_bus_dev, "failed to register %pUL\n", |
1202 | wblock->gblock.guid); | 1202 | wblock->gblock.guid); |
1203 | if (debug_event) | 1203 | if (debug_event) |
1204 | wmi_method_enable(wblock, 0); | 1204 | wmi_method_enable(wblock, 0); |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index dd4708c58480..1fc0c0811da4 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -4310,7 +4310,7 @@ static int _regulator_resume_early(struct device *dev, void *data) | |||
4310 | 4310 | ||
4311 | rstate = regulator_get_suspend_state(rdev, *state); | 4311 | rstate = regulator_get_suspend_state(rdev, *state); |
4312 | if (rstate == NULL) | 4312 | if (rstate == NULL) |
4313 | return -EINVAL; | 4313 | return 0; |
4314 | 4314 | ||
4315 | mutex_lock(&rdev->mutex); | 4315 | mutex_lock(&rdev->mutex); |
4316 | 4316 | ||
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index 72c8b3e1022b..e0a9c445ed67 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c | |||
@@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev) | |||
51 | * arbitrary timeout. | 51 | * arbitrary timeout. |
52 | */ | 52 | */ |
53 | ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val, | 53 | ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val, |
54 | !(val & STM32_VRR), 650, 10000); | 54 | val & STM32_VRR, 650, 10000); |
55 | if (ret) { | 55 | if (ret) { |
56 | dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n"); | 56 | dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n"); |
57 | val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); | 57 | val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index a7c15f0085e2..ecef8e73d40b 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -2581,8 +2581,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) | |||
2581 | case DASD_CQR_QUEUED: | 2581 | case DASD_CQR_QUEUED: |
2582 | /* request was not started - just set to cleared */ | 2582 | /* request was not started - just set to cleared */ |
2583 | cqr->status = DASD_CQR_CLEARED; | 2583 | cqr->status = DASD_CQR_CLEARED; |
2584 | if (cqr->callback_data == DASD_SLEEPON_START_TAG) | ||
2585 | cqr->callback_data = DASD_SLEEPON_END_TAG; | ||
2586 | break; | 2584 | break; |
2587 | case DASD_CQR_IN_IO: | 2585 | case DASD_CQR_IN_IO: |
2588 | /* request in IO - terminate IO and release again */ | 2586 | /* request in IO - terminate IO and release again */ |
@@ -3902,9 +3900,12 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) | |||
3902 | wait_event(dasd_flush_wq, | 3900 | wait_event(dasd_flush_wq, |
3903 | (cqr->status != DASD_CQR_CLEAR_PENDING)); | 3901 | (cqr->status != DASD_CQR_CLEAR_PENDING)); |
3904 | 3902 | ||
3905 | /* mark sleepon requests as ended */ | 3903 | /* |
3906 | if (cqr->callback_data == DASD_SLEEPON_START_TAG) | 3904 | * requeue requests to blocklayer will only work |
3907 | cqr->callback_data = DASD_SLEEPON_END_TAG; | 3905 | * for block device requests |
3906 | */ | ||
3907 | if (_dasd_requeue_request(cqr)) | ||
3908 | continue; | ||
3908 | 3909 | ||
3909 | /* remove requests from device and block queue */ | 3910 | /* remove requests from device and block queue */ |
3910 | list_del_init(&cqr->devlist); | 3911 | list_del_init(&cqr->devlist); |
@@ -3917,13 +3918,6 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) | |||
3917 | cqr = refers; | 3918 | cqr = refers; |
3918 | } | 3919 | } |
3919 | 3920 | ||
3920 | /* | ||
3921 | * requeue requests to blocklayer will only work | ||
3922 | * for block device requests | ||
3923 | */ | ||
3924 | if (_dasd_requeue_request(cqr)) | ||
3925 | continue; | ||
3926 | |||
3927 | if (cqr->block) | 3921 | if (cqr->block) |
3928 | list_del_init(&cqr->blocklist); | 3922 | list_del_init(&cqr->blocklist); |
3929 | cqr->block->base->discipline->free_cp( | 3923 | cqr->block->base->discipline->free_cp( |
@@ -3940,8 +3934,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) | |||
3940 | list_splice_tail(&requeue_queue, &device->ccw_queue); | 3934 | list_splice_tail(&requeue_queue, &device->ccw_queue); |
3941 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 3935 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
3942 | } | 3936 | } |
3943 | /* wake up generic waitqueue for eventually ended sleepon requests */ | 3937 | dasd_schedule_device_bh(device); |
3944 | wake_up(&generic_waitq); | ||
3945 | return rc; | 3938 | return rc; |
3946 | } | 3939 | } |
3947 | 3940 | ||
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 1319122e9d12..9169af7dbb43 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -795,6 +795,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
795 | 795 | ||
796 | ccw_device_set_timeout(cdev, 0); | 796 | ccw_device_set_timeout(cdev, 0); |
797 | cdev->private->iretry = 255; | 797 | cdev->private->iretry = 255; |
798 | cdev->private->async_kill_io_rc = -ETIMEDOUT; | ||
798 | ret = ccw_device_cancel_halt_clear(cdev); | 799 | ret = ccw_device_cancel_halt_clear(cdev); |
799 | if (ret == -EBUSY) { | 800 | if (ret == -EBUSY) { |
800 | ccw_device_set_timeout(cdev, 3*HZ); | 801 | ccw_device_set_timeout(cdev, 3*HZ); |
@@ -871,7 +872,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
871 | /* OK, i/o is dead now. Call interrupt handler. */ | 872 | /* OK, i/o is dead now. Call interrupt handler. */ |
872 | if (cdev->handler) | 873 | if (cdev->handler) |
873 | cdev->handler(cdev, cdev->private->intparm, | 874 | cdev->handler(cdev, cdev->private->intparm, |
874 | ERR_PTR(-EIO)); | 875 | ERR_PTR(cdev->private->async_kill_io_rc)); |
875 | } | 876 | } |
876 | 877 | ||
877 | static void | 878 | static void |
@@ -888,14 +889,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
888 | ccw_device_online_verify(cdev, 0); | 889 | ccw_device_online_verify(cdev, 0); |
889 | if (cdev->handler) | 890 | if (cdev->handler) |
890 | cdev->handler(cdev, cdev->private->intparm, | 891 | cdev->handler(cdev, cdev->private->intparm, |
891 | ERR_PTR(-EIO)); | 892 | ERR_PTR(cdev->private->async_kill_io_rc)); |
892 | } | 893 | } |
893 | 894 | ||
894 | void ccw_device_kill_io(struct ccw_device *cdev) | 895 | void ccw_device_kill_io(struct ccw_device *cdev) |
895 | { | 896 | { |
896 | int ret; | 897 | int ret; |
897 | 898 | ||
899 | ccw_device_set_timeout(cdev, 0); | ||
898 | cdev->private->iretry = 255; | 900 | cdev->private->iretry = 255; |
901 | cdev->private->async_kill_io_rc = -EIO; | ||
899 | ret = ccw_device_cancel_halt_clear(cdev); | 902 | ret = ccw_device_cancel_halt_clear(cdev); |
900 | if (ret == -EBUSY) { | 903 | if (ret == -EBUSY) { |
901 | ccw_device_set_timeout(cdev, 3*HZ); | 904 | ccw_device_set_timeout(cdev, 3*HZ); |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 1caf6a398760..75ce12a24dc2 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -159,7 +159,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) | |||
159 | } | 159 | } |
160 | 160 | ||
161 | /** | 161 | /** |
162 | * ccw_device_start_key() - start a s390 channel program with key | 162 | * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key |
163 | * @cdev: target ccw device | 163 | * @cdev: target ccw device |
164 | * @cpa: logical start address of channel program | 164 | * @cpa: logical start address of channel program |
165 | * @intparm: user specific interruption parameter; will be presented back to | 165 | * @intparm: user specific interruption parameter; will be presented back to |
@@ -170,10 +170,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) | |||
170 | * @key: storage key to be used for the I/O | 170 | * @key: storage key to be used for the I/O |
171 | * @flags: additional flags; defines the action to be performed for I/O | 171 | * @flags: additional flags; defines the action to be performed for I/O |
172 | * processing. | 172 | * processing. |
173 | * @expires: timeout value in jiffies | ||
173 | * | 174 | * |
174 | * Start a S/390 channel program. When the interrupt arrives, the | 175 | * Start a S/390 channel program. When the interrupt arrives, the |
175 | * IRQ handler is called, either immediately, delayed (dev-end missing, | 176 | * IRQ handler is called, either immediately, delayed (dev-end missing, |
176 | * or sense required) or never (no IRQ handler registered). | 177 | * or sense required) or never (no IRQ handler registered). |
178 | * This function notifies the device driver if the channel program has not | ||
179 | * completed during the time specified by @expires. If a timeout occurs, the | ||
180 | * channel program is terminated via xsch, hsch or csch, and the device's | ||
181 | * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). | ||
177 | * Returns: | 182 | * Returns: |
178 | * %0, if the operation was successful; | 183 | * %0, if the operation was successful; |
179 | * -%EBUSY, if the device is busy, or status pending; | 184 | * -%EBUSY, if the device is busy, or status pending; |
@@ -182,9 +187,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) | |||
182 | * Context: | 187 | * Context: |
183 | * Interrupts disabled, ccw device lock held | 188 | * Interrupts disabled, ccw device lock held |
184 | */ | 189 | */ |
185 | int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | 190 | int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, |
186 | unsigned long intparm, __u8 lpm, __u8 key, | 191 | unsigned long intparm, __u8 lpm, __u8 key, |
187 | unsigned long flags) | 192 | unsigned long flags, int expires) |
188 | { | 193 | { |
189 | struct subchannel *sch; | 194 | struct subchannel *sch; |
190 | int ret; | 195 | int ret; |
@@ -224,6 +229,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
224 | switch (ret) { | 229 | switch (ret) { |
225 | case 0: | 230 | case 0: |
226 | cdev->private->intparm = intparm; | 231 | cdev->private->intparm = intparm; |
232 | if (expires) | ||
233 | ccw_device_set_timeout(cdev, expires); | ||
227 | break; | 234 | break; |
228 | case -EACCES: | 235 | case -EACCES: |
229 | case -ENODEV: | 236 | case -ENODEV: |
@@ -234,7 +241,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
234 | } | 241 | } |
235 | 242 | ||
236 | /** | 243 | /** |
237 | * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key | 244 | * ccw_device_start_key() - start a s390 channel program with key |
238 | * @cdev: target ccw device | 245 | * @cdev: target ccw device |
239 | * @cpa: logical start address of channel program | 246 | * @cpa: logical start address of channel program |
240 | * @intparm: user specific interruption parameter; will be presented back to | 247 | * @intparm: user specific interruption parameter; will be presented back to |
@@ -245,15 +252,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
245 | * @key: storage key to be used for the I/O | 252 | * @key: storage key to be used for the I/O |
246 | * @flags: additional flags; defines the action to be performed for I/O | 253 | * @flags: additional flags; defines the action to be performed for I/O |
247 | * processing. | 254 | * processing. |
248 | * @expires: timeout value in jiffies | ||
249 | * | 255 | * |
250 | * Start a S/390 channel program. When the interrupt arrives, the | 256 | * Start a S/390 channel program. When the interrupt arrives, the |
251 | * IRQ handler is called, either immediately, delayed (dev-end missing, | 257 | * IRQ handler is called, either immediately, delayed (dev-end missing, |
252 | * or sense required) or never (no IRQ handler registered). | 258 | * or sense required) or never (no IRQ handler registered). |
253 | * This function notifies the device driver if the channel program has not | ||
254 | * completed during the time specified by @expires. If a timeout occurs, the | ||
255 | * channel program is terminated via xsch, hsch or csch, and the device's | ||
256 | * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). | ||
257 | * Returns: | 259 | * Returns: |
258 | * %0, if the operation was successful; | 260 | * %0, if the operation was successful; |
259 | * -%EBUSY, if the device is busy, or status pending; | 261 | * -%EBUSY, if the device is busy, or status pending; |
@@ -262,19 +264,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
262 | * Context: | 264 | * Context: |
263 | * Interrupts disabled, ccw device lock held | 265 | * Interrupts disabled, ccw device lock held |
264 | */ | 266 | */ |
265 | int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, | 267 | int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, |
266 | unsigned long intparm, __u8 lpm, __u8 key, | 268 | unsigned long intparm, __u8 lpm, __u8 key, |
267 | unsigned long flags, int expires) | 269 | unsigned long flags) |
268 | { | 270 | { |
269 | int ret; | 271 | return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key, |
270 | 272 | flags, 0); | |
271 | if (!cdev) | ||
272 | return -ENODEV; | ||
273 | ccw_device_set_timeout(cdev, expires); | ||
274 | ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags); | ||
275 | if (ret != 0) | ||
276 | ccw_device_set_timeout(cdev, 0); | ||
277 | return ret; | ||
278 | } | 273 | } |
279 | 274 | ||
280 | /** | 275 | /** |
@@ -489,18 +484,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) | |||
489 | EXPORT_SYMBOL(ccw_device_get_id); | 484 | EXPORT_SYMBOL(ccw_device_get_id); |
490 | 485 | ||
491 | /** | 486 | /** |
492 | * ccw_device_tm_start_key() - perform start function | 487 | * ccw_device_tm_start_timeout_key() - perform start function |
493 | * @cdev: ccw device on which to perform the start function | 488 | * @cdev: ccw device on which to perform the start function |
494 | * @tcw: transport-command word to be started | 489 | * @tcw: transport-command word to be started |
495 | * @intparm: user defined parameter to be passed to the interrupt handler | 490 | * @intparm: user defined parameter to be passed to the interrupt handler |
496 | * @lpm: mask of paths to use | 491 | * @lpm: mask of paths to use |
497 | * @key: storage key to use for storage access | 492 | * @key: storage key to use for storage access |
493 | * @expires: time span in jiffies after which to abort request | ||
498 | * | 494 | * |
499 | * Start the tcw on the given ccw device. Return zero on success, non-zero | 495 | * Start the tcw on the given ccw device. Return zero on success, non-zero |
500 | * otherwise. | 496 | * otherwise. |
501 | */ | 497 | */ |
502 | int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, | 498 | int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, |
503 | unsigned long intparm, u8 lpm, u8 key) | 499 | unsigned long intparm, u8 lpm, u8 key, |
500 | int expires) | ||
504 | { | 501 | { |
505 | struct subchannel *sch; | 502 | struct subchannel *sch; |
506 | int rc; | 503 | int rc; |
@@ -527,37 +524,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, | |||
527 | return -EACCES; | 524 | return -EACCES; |
528 | } | 525 | } |
529 | rc = cio_tm_start_key(sch, tcw, lpm, key); | 526 | rc = cio_tm_start_key(sch, tcw, lpm, key); |
530 | if (rc == 0) | 527 | if (rc == 0) { |
531 | cdev->private->intparm = intparm; | 528 | cdev->private->intparm = intparm; |
529 | if (expires) | ||
530 | ccw_device_set_timeout(cdev, expires); | ||
531 | } | ||
532 | return rc; | 532 | return rc; |
533 | } | 533 | } |
534 | EXPORT_SYMBOL(ccw_device_tm_start_key); | 534 | EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); |
535 | 535 | ||
536 | /** | 536 | /** |
537 | * ccw_device_tm_start_timeout_key() - perform start function | 537 | * ccw_device_tm_start_key() - perform start function |
538 | * @cdev: ccw device on which to perform the start function | 538 | * @cdev: ccw device on which to perform the start function |
539 | * @tcw: transport-command word to be started | 539 | * @tcw: transport-command word to be started |
540 | * @intparm: user defined parameter to be passed to the interrupt handler | 540 | * @intparm: user defined parameter to be passed to the interrupt handler |
541 | * @lpm: mask of paths to use | 541 | * @lpm: mask of paths to use |
542 | * @key: storage key to use for storage access | 542 | * @key: storage key to use for storage access |
543 | * @expires: time span in jiffies after which to abort request | ||
544 | * | 543 | * |
545 | * Start the tcw on the given ccw device. Return zero on success, non-zero | 544 | * Start the tcw on the given ccw device. Return zero on success, non-zero |
546 | * otherwise. | 545 | * otherwise. |
547 | */ | 546 | */ |
548 | int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, | 547 | int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, |
549 | unsigned long intparm, u8 lpm, u8 key, | 548 | unsigned long intparm, u8 lpm, u8 key) |
550 | int expires) | ||
551 | { | 549 | { |
552 | int ret; | 550 | return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0); |
553 | |||
554 | ccw_device_set_timeout(cdev, expires); | ||
555 | ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); | ||
556 | if (ret != 0) | ||
557 | ccw_device_set_timeout(cdev, 0); | ||
558 | return ret; | ||
559 | } | 551 | } |
560 | EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); | 552 | EXPORT_SYMBOL(ccw_device_tm_start_key); |
561 | 553 | ||
562 | /** | 554 | /** |
563 | * ccw_device_tm_start() - perform start function | 555 | * ccw_device_tm_start() - perform start function |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index af571d8d6925..90e4e3a7841b 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -157,6 +157,7 @@ struct ccw_device_private { | |||
157 | unsigned long intparm; /* user interruption parameter */ | 157 | unsigned long intparm; /* user interruption parameter */ |
158 | struct qdio_irq *qdio_data; | 158 | struct qdio_irq *qdio_data; |
159 | struct irb irb; /* device status */ | 159 | struct irb irb; /* device status */ |
160 | int async_kill_io_rc; | ||
160 | struct senseid senseid; /* SenseID info */ | 161 | struct senseid senseid; /* SenseID info */ |
161 | struct pgid pgid[8]; /* path group IDs per chpid*/ | 162 | struct pgid pgid[8]; /* path group IDs per chpid*/ |
162 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ | 163 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ca72f3311004..c8b308cfabf1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -2134,24 +2134,25 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
2134 | } | 2134 | } |
2135 | reply->callback = reply_cb; | 2135 | reply->callback = reply_cb; |
2136 | reply->param = reply_param; | 2136 | reply->param = reply_param; |
2137 | if (card->state == CARD_STATE_DOWN) | 2137 | |
2138 | reply->seqno = QETH_IDX_COMMAND_SEQNO; | ||
2139 | else | ||
2140 | reply->seqno = card->seqno.ipa++; | ||
2141 | init_waitqueue_head(&reply->wait_q); | 2138 | init_waitqueue_head(&reply->wait_q); |
2142 | spin_lock_irqsave(&card->lock, flags); | ||
2143 | list_add_tail(&reply->list, &card->cmd_waiter_list); | ||
2144 | spin_unlock_irqrestore(&card->lock, flags); | ||
2145 | 2139 | ||
2146 | while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; | 2140 | while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; |
2147 | qeth_prepare_control_data(card, len, iob); | ||
2148 | 2141 | ||
2149 | if (IS_IPA(iob->data)) { | 2142 | if (IS_IPA(iob->data)) { |
2150 | cmd = __ipa_cmd(iob); | 2143 | cmd = __ipa_cmd(iob); |
2144 | cmd->hdr.seqno = card->seqno.ipa++; | ||
2145 | reply->seqno = cmd->hdr.seqno; | ||
2151 | event_timeout = QETH_IPA_TIMEOUT; | 2146 | event_timeout = QETH_IPA_TIMEOUT; |
2152 | } else { | 2147 | } else { |
2148 | reply->seqno = QETH_IDX_COMMAND_SEQNO; | ||
2153 | event_timeout = QETH_TIMEOUT; | 2149 | event_timeout = QETH_TIMEOUT; |
2154 | } | 2150 | } |
2151 | qeth_prepare_control_data(card, len, iob); | ||
2152 | |||
2153 | spin_lock_irqsave(&card->lock, flags); | ||
2154 | list_add_tail(&reply->list, &card->cmd_waiter_list); | ||
2155 | spin_unlock_irqrestore(&card->lock, flags); | ||
2155 | 2156 | ||
2156 | timeout = jiffies + event_timeout; | 2157 | timeout = jiffies + event_timeout; |
2157 | 2158 | ||
@@ -2933,7 +2934,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, | |||
2933 | memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); | 2934 | memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); |
2934 | cmd->hdr.command = command; | 2935 | cmd->hdr.command = command; |
2935 | cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; | 2936 | cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; |
2936 | cmd->hdr.seqno = card->seqno.ipa; | 2937 | /* cmd->hdr.seqno is set by qeth_send_control_data() */ |
2937 | cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); | 2938 | cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); |
2938 | cmd->hdr.rel_adapter_no = (__u8) card->info.portno; | 2939 | cmd->hdr.rel_adapter_no = (__u8) card->info.portno; |
2939 | if (card->options.layer2) | 2940 | if (card->options.layer2) |
@@ -3898,10 +3899,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); | |||
3898 | int qeth_get_elements_no(struct qeth_card *card, | 3899 | int qeth_get_elements_no(struct qeth_card *card, |
3899 | struct sk_buff *skb, int extra_elems, int data_offset) | 3900 | struct sk_buff *skb, int extra_elems, int data_offset) |
3900 | { | 3901 | { |
3901 | int elements = qeth_get_elements_for_range( | 3902 | addr_t end = (addr_t)skb->data + skb_headlen(skb); |
3902 | (addr_t)skb->data + data_offset, | 3903 | int elements = qeth_get_elements_for_frags(skb); |
3903 | (addr_t)skb->data + skb_headlen(skb)) + | 3904 | addr_t start = (addr_t)skb->data + data_offset; |
3904 | qeth_get_elements_for_frags(skb); | 3905 | |
3906 | if (start != end) | ||
3907 | elements += qeth_get_elements_for_range(start, end); | ||
3905 | 3908 | ||
3906 | if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 3909 | if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
3907 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " | 3910 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " |
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index bdd45f4dcace..498fe9af2cdb 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h | |||
@@ -40,8 +40,40 @@ struct qeth_ipaddr { | |||
40 | unsigned int pfxlen; | 40 | unsigned int pfxlen; |
41 | } a6; | 41 | } a6; |
42 | } u; | 42 | } u; |
43 | |||
44 | }; | 43 | }; |
44 | |||
45 | static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1, | ||
46 | struct qeth_ipaddr *a2) | ||
47 | { | ||
48 | if (a1->proto != a2->proto) | ||
49 | return false; | ||
50 | if (a1->proto == QETH_PROT_IPV6) | ||
51 | return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr); | ||
52 | return a1->u.a4.addr == a2->u.a4.addr; | ||
53 | } | ||
54 | |||
55 | static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1, | ||
56 | struct qeth_ipaddr *a2) | ||
57 | { | ||
58 | /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(), | ||
59 | * so 'proto' and 'addr' match for sure. | ||
60 | * | ||
61 | * For ucast: | ||
62 | * - 'mac' is always 0. | ||
63 | * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching | ||
64 | * values are required to avoid mixups in takeover eligibility. | ||
65 | * | ||
66 | * For mcast, | ||
67 | * - 'mac' is mapped from the IP, and thus always matches. | ||
68 | * - 'mask'/'pfxlen' is always 0. | ||
69 | */ | ||
70 | if (a1->type != a2->type) | ||
71 | return false; | ||
72 | if (a1->proto == QETH_PROT_IPV6) | ||
73 | return a1->u.a6.pfxlen == a2->u.a6.pfxlen; | ||
74 | return a1->u.a4.mask == a2->u.a4.mask; | ||
75 | } | ||
76 | |||
45 | static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) | 77 | static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) |
46 | { | 78 | { |
47 | u64 ret = 0; | 79 | u64 ret = 0; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b0c888e86cd4..962a04b68dd2 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -67,6 +67,24 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, | |||
67 | qeth_l3_ipaddr6_to_string(addr, buf); | 67 | qeth_l3_ipaddr6_to_string(addr, buf); |
68 | } | 68 | } |
69 | 69 | ||
70 | static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, | ||
71 | struct qeth_ipaddr *query) | ||
72 | { | ||
73 | u64 key = qeth_l3_ipaddr_hash(query); | ||
74 | struct qeth_ipaddr *addr; | ||
75 | |||
76 | if (query->is_multicast) { | ||
77 | hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) | ||
78 | if (qeth_l3_addr_match_ip(addr, query)) | ||
79 | return addr; | ||
80 | } else { | ||
81 | hash_for_each_possible(card->ip_htable, addr, hnode, key) | ||
82 | if (qeth_l3_addr_match_ip(addr, query)) | ||
83 | return addr; | ||
84 | } | ||
85 | return NULL; | ||
86 | } | ||
87 | |||
70 | static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) | 88 | static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) |
71 | { | 89 | { |
72 | int i, j; | 90 | int i, j; |
@@ -120,34 +138,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, | |||
120 | return rc; | 138 | return rc; |
121 | } | 139 | } |
122 | 140 | ||
123 | inline int | ||
124 | qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2) | ||
125 | { | ||
126 | return addr1->proto == addr2->proto && | ||
127 | !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) && | ||
128 | ether_addr_equal_64bits(addr1->mac, addr2->mac); | ||
129 | } | ||
130 | |||
131 | static struct qeth_ipaddr * | ||
132 | qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | ||
133 | { | ||
134 | struct qeth_ipaddr *addr; | ||
135 | |||
136 | if (tmp_addr->is_multicast) { | ||
137 | hash_for_each_possible(card->ip_mc_htable, addr, | ||
138 | hnode, qeth_l3_ipaddr_hash(tmp_addr)) | ||
139 | if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) | ||
140 | return addr; | ||
141 | } else { | ||
142 | hash_for_each_possible(card->ip_htable, addr, | ||
143 | hnode, qeth_l3_ipaddr_hash(tmp_addr)) | ||
144 | if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) | ||
145 | return addr; | ||
146 | } | ||
147 | |||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | 141 | int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) |
152 | { | 142 | { |
153 | int rc = 0; | 143 | int rc = 0; |
@@ -162,23 +152,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
162 | QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); | 152 | QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); |
163 | } | 153 | } |
164 | 154 | ||
165 | addr = qeth_l3_ip_from_hash(card, tmp_addr); | 155 | addr = qeth_l3_find_addr_by_ip(card, tmp_addr); |
166 | if (!addr) | 156 | if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) |
167 | return -ENOENT; | 157 | return -ENOENT; |
168 | 158 | ||
169 | addr->ref_counter--; | 159 | addr->ref_counter--; |
170 | if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || | 160 | if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) |
171 | addr->type == QETH_IP_TYPE_RXIP)) | ||
172 | return rc; | 161 | return rc; |
173 | if (addr->in_progress) | 162 | if (addr->in_progress) |
174 | return -EINPROGRESS; | 163 | return -EINPROGRESS; |
175 | 164 | ||
176 | if (!qeth_card_hw_is_reachable(card)) { | 165 | if (qeth_card_hw_is_reachable(card)) |
177 | addr->disp_flag = QETH_DISP_ADDR_DELETE; | 166 | rc = qeth_l3_deregister_addr_entry(card, addr); |
178 | return 0; | ||
179 | } | ||
180 | |||
181 | rc = qeth_l3_deregister_addr_entry(card, addr); | ||
182 | 167 | ||
183 | hash_del(&addr->hnode); | 168 | hash_del(&addr->hnode); |
184 | kfree(addr); | 169 | kfree(addr); |
@@ -190,6 +175,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
190 | { | 175 | { |
191 | int rc = 0; | 176 | int rc = 0; |
192 | struct qeth_ipaddr *addr; | 177 | struct qeth_ipaddr *addr; |
178 | char buf[40]; | ||
193 | 179 | ||
194 | QETH_CARD_TEXT(card, 4, "addip"); | 180 | QETH_CARD_TEXT(card, 4, "addip"); |
195 | 181 | ||
@@ -200,8 +186,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
200 | QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); | 186 | QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); |
201 | } | 187 | } |
202 | 188 | ||
203 | addr = qeth_l3_ip_from_hash(card, tmp_addr); | 189 | addr = qeth_l3_find_addr_by_ip(card, tmp_addr); |
204 | if (!addr) { | 190 | if (addr) { |
191 | if (tmp_addr->type != QETH_IP_TYPE_NORMAL) | ||
192 | return -EADDRINUSE; | ||
193 | if (qeth_l3_addr_match_all(addr, tmp_addr)) { | ||
194 | addr->ref_counter++; | ||
195 | return 0; | ||
196 | } | ||
197 | qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, | ||
198 | buf); | ||
199 | dev_warn(&card->gdev->dev, | ||
200 | "Registering IP address %s failed\n", buf); | ||
201 | return -EADDRINUSE; | ||
202 | } else { | ||
205 | addr = qeth_l3_get_addr_buffer(tmp_addr->proto); | 203 | addr = qeth_l3_get_addr_buffer(tmp_addr->proto); |
206 | if (!addr) | 204 | if (!addr) |
207 | return -ENOMEM; | 205 | return -ENOMEM; |
@@ -241,19 +239,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
241 | (rc == IPA_RC_LAN_OFFLINE)) { | 239 | (rc == IPA_RC_LAN_OFFLINE)) { |
242 | addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; | 240 | addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; |
243 | if (addr->ref_counter < 1) { | 241 | if (addr->ref_counter < 1) { |
244 | qeth_l3_delete_ip(card, addr); | 242 | qeth_l3_deregister_addr_entry(card, addr); |
243 | hash_del(&addr->hnode); | ||
245 | kfree(addr); | 244 | kfree(addr); |
246 | } | 245 | } |
247 | } else { | 246 | } else { |
248 | hash_del(&addr->hnode); | 247 | hash_del(&addr->hnode); |
249 | kfree(addr); | 248 | kfree(addr); |
250 | } | 249 | } |
251 | } else { | ||
252 | if (addr->type == QETH_IP_TYPE_NORMAL || | ||
253 | addr->type == QETH_IP_TYPE_RXIP) | ||
254 | addr->ref_counter++; | ||
255 | } | 250 | } |
256 | |||
257 | return rc; | 251 | return rc; |
258 | } | 252 | } |
259 | 253 | ||
@@ -321,11 +315,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card) | |||
321 | spin_lock_bh(&card->ip_lock); | 315 | spin_lock_bh(&card->ip_lock); |
322 | 316 | ||
323 | hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { | 317 | hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { |
324 | if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { | 318 | if (addr->disp_flag == QETH_DISP_ADDR_ADD) { |
325 | qeth_l3_deregister_addr_entry(card, addr); | ||
326 | hash_del(&addr->hnode); | ||
327 | kfree(addr); | ||
328 | } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) { | ||
329 | if (addr->proto == QETH_PROT_IPV4) { | 319 | if (addr->proto == QETH_PROT_IPV4) { |
330 | addr->in_progress = 1; | 320 | addr->in_progress = 1; |
331 | spin_unlock_bh(&card->ip_lock); | 321 | spin_unlock_bh(&card->ip_lock); |
@@ -643,12 +633,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
643 | return -ENOMEM; | 633 | return -ENOMEM; |
644 | 634 | ||
645 | spin_lock_bh(&card->ip_lock); | 635 | spin_lock_bh(&card->ip_lock); |
646 | 636 | rc = qeth_l3_add_ip(card, ipaddr); | |
647 | if (qeth_l3_ip_from_hash(card, ipaddr)) | ||
648 | rc = -EEXIST; | ||
649 | else | ||
650 | rc = qeth_l3_add_ip(card, ipaddr); | ||
651 | |||
652 | spin_unlock_bh(&card->ip_lock); | 637 | spin_unlock_bh(&card->ip_lock); |
653 | 638 | ||
654 | kfree(ipaddr); | 639 | kfree(ipaddr); |
@@ -713,12 +698,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
713 | return -ENOMEM; | 698 | return -ENOMEM; |
714 | 699 | ||
715 | spin_lock_bh(&card->ip_lock); | 700 | spin_lock_bh(&card->ip_lock); |
716 | 701 | rc = qeth_l3_add_ip(card, ipaddr); | |
717 | if (qeth_l3_ip_from_hash(card, ipaddr)) | ||
718 | rc = -EEXIST; | ||
719 | else | ||
720 | rc = qeth_l3_add_ip(card, ipaddr); | ||
721 | |||
722 | spin_unlock_bh(&card->ip_lock); | 702 | spin_unlock_bh(&card->ip_lock); |
723 | 703 | ||
724 | kfree(ipaddr); | 704 | kfree(ipaddr); |
@@ -1239,8 +1219,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) | |||
1239 | tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); | 1219 | tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); |
1240 | tmp->is_multicast = 1; | 1220 | tmp->is_multicast = 1; |
1241 | 1221 | ||
1242 | ipm = qeth_l3_ip_from_hash(card, tmp); | 1222 | ipm = qeth_l3_find_addr_by_ip(card, tmp); |
1243 | if (ipm) { | 1223 | if (ipm) { |
1224 | /* for mcast, by-IP match means full match */ | ||
1244 | ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; | 1225 | ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; |
1245 | } else { | 1226 | } else { |
1246 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); | 1227 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); |
@@ -1319,8 +1300,9 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card, | |||
1319 | sizeof(struct in6_addr)); | 1300 | sizeof(struct in6_addr)); |
1320 | tmp->is_multicast = 1; | 1301 | tmp->is_multicast = 1; |
1321 | 1302 | ||
1322 | ipm = qeth_l3_ip_from_hash(card, tmp); | 1303 | ipm = qeth_l3_find_addr_by_ip(card, tmp); |
1323 | if (ipm) { | 1304 | if (ipm) { |
1305 | /* for mcast, by-IP match means full match */ | ||
1324 | ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; | 1306 | ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; |
1325 | continue; | 1307 | continue; |
1326 | } | 1308 | } |
@@ -2450,11 +2432,12 @@ static void qeth_tso_fill_header(struct qeth_card *card, | |||
2450 | static int qeth_l3_get_elements_no_tso(struct qeth_card *card, | 2432 | static int qeth_l3_get_elements_no_tso(struct qeth_card *card, |
2451 | struct sk_buff *skb, int extra_elems) | 2433 | struct sk_buff *skb, int extra_elems) |
2452 | { | 2434 | { |
2453 | addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); | 2435 | addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); |
2454 | int elements = qeth_get_elements_for_range( | 2436 | addr_t end = (addr_t)skb->data + skb_headlen(skb); |
2455 | tcpdptr, | 2437 | int elements = qeth_get_elements_for_frags(skb); |
2456 | (addr_t)skb->data + skb_headlen(skb)) + | 2438 | |
2457 | qeth_get_elements_for_frags(skb); | 2439 | if (start != end) |
2440 | elements += qeth_get_elements_for_range(start, end); | ||
2458 | 2441 | ||
2459 | if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 2442 | if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
2460 | QETH_DBF_MESSAGE(2, | 2443 | QETH_DBF_MESSAGE(2, |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 57bf43e34863..dd9464920456 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -328,8 +328,6 @@ static void scsi_host_dev_release(struct device *dev) | |||
328 | if (shost->work_q) | 328 | if (shost->work_q) |
329 | destroy_workqueue(shost->work_q); | 329 | destroy_workqueue(shost->work_q); |
330 | 330 | ||
331 | destroy_rcu_head(&shost->rcu); | ||
332 | |||
333 | if (shost->shost_state == SHOST_CREATED) { | 331 | if (shost->shost_state == SHOST_CREATED) { |
334 | /* | 332 | /* |
335 | * Free the shost_dev device name here if scsi_host_alloc() | 333 | * Free the shost_dev device name here if scsi_host_alloc() |
@@ -404,7 +402,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) | |||
404 | INIT_LIST_HEAD(&shost->starved_list); | 402 | INIT_LIST_HEAD(&shost->starved_list); |
405 | init_waitqueue_head(&shost->host_wait); | 403 | init_waitqueue_head(&shost->host_wait); |
406 | mutex_init(&shost->scan_mutex); | 404 | mutex_init(&shost->scan_mutex); |
407 | init_rcu_head(&shost->rcu); | ||
408 | 405 | ||
409 | index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); | 406 | index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); |
410 | if (index < 0) | 407 | if (index < 0) |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 073ced07e662..dc8e850fbfd2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -216,36 +216,30 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, | |||
216 | /** | 216 | /** |
217 | * megasas_fire_cmd_fusion - Sends command to the FW | 217 | * megasas_fire_cmd_fusion - Sends command to the FW |
218 | * @instance: Adapter soft state | 218 | * @instance: Adapter soft state |
219 | * @req_desc: 32bit or 64bit Request descriptor | 219 | * @req_desc: 64bit Request descriptor |
220 | * | 220 | * |
221 | * Perform PCI Write. Ventura supports 32 bit Descriptor. | 221 | * Perform PCI Write. |
222 | * Prior to Ventura (12G) MR controller supports 64 bit Descriptor. | ||
223 | */ | 222 | */ |
224 | 223 | ||
225 | static void | 224 | static void |
226 | megasas_fire_cmd_fusion(struct megasas_instance *instance, | 225 | megasas_fire_cmd_fusion(struct megasas_instance *instance, |
227 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) | 226 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) |
228 | { | 227 | { |
229 | if (instance->adapter_type == VENTURA_SERIES) | ||
230 | writel(le32_to_cpu(req_desc->u.low), | ||
231 | &instance->reg_set->inbound_single_queue_port); | ||
232 | else { | ||
233 | #if defined(writeq) && defined(CONFIG_64BIT) | 228 | #if defined(writeq) && defined(CONFIG_64BIT) |
234 | u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | | 229 | u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | |
235 | le32_to_cpu(req_desc->u.low)); | 230 | le32_to_cpu(req_desc->u.low)); |
236 | 231 | ||
237 | writeq(req_data, &instance->reg_set->inbound_low_queue_port); | 232 | writeq(req_data, &instance->reg_set->inbound_low_queue_port); |
238 | #else | 233 | #else |
239 | unsigned long flags; | 234 | unsigned long flags; |
240 | spin_lock_irqsave(&instance->hba_lock, flags); | 235 | spin_lock_irqsave(&instance->hba_lock, flags); |
241 | writel(le32_to_cpu(req_desc->u.low), | 236 | writel(le32_to_cpu(req_desc->u.low), |
242 | &instance->reg_set->inbound_low_queue_port); | 237 | &instance->reg_set->inbound_low_queue_port); |
243 | writel(le32_to_cpu(req_desc->u.high), | 238 | writel(le32_to_cpu(req_desc->u.high), |
244 | &instance->reg_set->inbound_high_queue_port); | 239 | &instance->reg_set->inbound_high_queue_port); |
245 | mmiowb(); | 240 | mmiowb(); |
246 | spin_unlock_irqrestore(&instance->hba_lock, flags); | 241 | spin_unlock_irqrestore(&instance->hba_lock, flags); |
247 | #endif | 242 | #endif |
248 | } | ||
249 | } | 243 | } |
250 | 244 | ||
251 | /** | 245 | /** |
@@ -982,7 +976,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
982 | const char *sys_info; | 976 | const char *sys_info; |
983 | MFI_CAPABILITIES *drv_ops; | 977 | MFI_CAPABILITIES *drv_ops; |
984 | u32 scratch_pad_2; | 978 | u32 scratch_pad_2; |
985 | unsigned long flags; | ||
986 | ktime_t time; | 979 | ktime_t time; |
987 | bool cur_fw_64bit_dma_capable; | 980 | bool cur_fw_64bit_dma_capable; |
988 | 981 | ||
@@ -1121,14 +1114,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
1121 | break; | 1114 | break; |
1122 | } | 1115 | } |
1123 | 1116 | ||
1124 | /* For Ventura also IOC INIT required 64 bit Descriptor write. */ | 1117 | megasas_fire_cmd_fusion(instance, &req_desc); |
1125 | spin_lock_irqsave(&instance->hba_lock, flags); | ||
1126 | writel(le32_to_cpu(req_desc.u.low), | ||
1127 | &instance->reg_set->inbound_low_queue_port); | ||
1128 | writel(le32_to_cpu(req_desc.u.high), | ||
1129 | &instance->reg_set->inbound_high_queue_port); | ||
1130 | mmiowb(); | ||
1131 | spin_unlock_irqrestore(&instance->hba_lock, flags); | ||
1132 | 1118 | ||
1133 | wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); | 1119 | wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); |
1134 | 1120 | ||
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 59a87ca328d3..0aafbfd1b746 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -6297,14 +6297,14 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) | |||
6297 | } | 6297 | } |
6298 | 6298 | ||
6299 | /** | 6299 | /** |
6300 | * _wait_for_commands_to_complete - reset controller | 6300 | * mpt3sas_wait_for_commands_to_complete - reset controller |
6301 | * @ioc: Pointer to MPT_ADAPTER structure | 6301 | * @ioc: Pointer to MPT_ADAPTER structure |
6302 | * | 6302 | * |
6303 | * This function is waiting 10s for all pending commands to complete | 6303 | * This function is waiting 10s for all pending commands to complete |
6304 | * prior to putting controller in reset. | 6304 | * prior to putting controller in reset. |
6305 | */ | 6305 | */ |
6306 | static void | 6306 | void |
6307 | _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) | 6307 | mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) |
6308 | { | 6308 | { |
6309 | u32 ioc_state; | 6309 | u32 ioc_state; |
6310 | 6310 | ||
@@ -6377,7 +6377,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, | |||
6377 | is_fault = 1; | 6377 | is_fault = 1; |
6378 | } | 6378 | } |
6379 | _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); | 6379 | _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); |
6380 | _wait_for_commands_to_complete(ioc); | 6380 | mpt3sas_wait_for_commands_to_complete(ioc); |
6381 | _base_mask_interrupts(ioc); | 6381 | _base_mask_interrupts(ioc); |
6382 | r = _base_make_ioc_ready(ioc, type); | 6382 | r = _base_make_ioc_ready(ioc, type); |
6383 | if (r) | 6383 | if (r) |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 789bc421424b..99ccf83b8c51 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h | |||
@@ -1433,6 +1433,9 @@ void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, | |||
1433 | 1433 | ||
1434 | int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); | 1434 | int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); |
1435 | 1435 | ||
1436 | void | ||
1437 | mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); | ||
1438 | |||
1436 | 1439 | ||
1437 | /* scsih shared API */ | 1440 | /* scsih shared API */ |
1438 | struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, | 1441 | struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 74fca184dba9..a1cb0236c550 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -2835,7 +2835,8 @@ scsih_abort(struct scsi_cmnd *scmd) | |||
2835 | _scsih_tm_display_info(ioc, scmd); | 2835 | _scsih_tm_display_info(ioc, scmd); |
2836 | 2836 | ||
2837 | sas_device_priv_data = scmd->device->hostdata; | 2837 | sas_device_priv_data = scmd->device->hostdata; |
2838 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { | 2838 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target || |
2839 | ioc->remove_host) { | ||
2839 | sdev_printk(KERN_INFO, scmd->device, | 2840 | sdev_printk(KERN_INFO, scmd->device, |
2840 | "device been deleted! scmd(%p)\n", scmd); | 2841 | "device been deleted! scmd(%p)\n", scmd); |
2841 | scmd->result = DID_NO_CONNECT << 16; | 2842 | scmd->result = DID_NO_CONNECT << 16; |
@@ -2898,7 +2899,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd) | |||
2898 | _scsih_tm_display_info(ioc, scmd); | 2899 | _scsih_tm_display_info(ioc, scmd); |
2899 | 2900 | ||
2900 | sas_device_priv_data = scmd->device->hostdata; | 2901 | sas_device_priv_data = scmd->device->hostdata; |
2901 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { | 2902 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target || |
2903 | ioc->remove_host) { | ||
2902 | sdev_printk(KERN_INFO, scmd->device, | 2904 | sdev_printk(KERN_INFO, scmd->device, |
2903 | "device been deleted! scmd(%p)\n", scmd); | 2905 | "device been deleted! scmd(%p)\n", scmd); |
2904 | scmd->result = DID_NO_CONNECT << 16; | 2906 | scmd->result = DID_NO_CONNECT << 16; |
@@ -2961,7 +2963,8 @@ scsih_target_reset(struct scsi_cmnd *scmd) | |||
2961 | _scsih_tm_display_info(ioc, scmd); | 2963 | _scsih_tm_display_info(ioc, scmd); |
2962 | 2964 | ||
2963 | sas_device_priv_data = scmd->device->hostdata; | 2965 | sas_device_priv_data = scmd->device->hostdata; |
2964 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { | 2966 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target || |
2967 | ioc->remove_host) { | ||
2965 | starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", | 2968 | starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", |
2966 | scmd); | 2969 | scmd); |
2967 | scmd->result = DID_NO_CONNECT << 16; | 2970 | scmd->result = DID_NO_CONNECT << 16; |
@@ -3019,7 +3022,7 @@ scsih_host_reset(struct scsi_cmnd *scmd) | |||
3019 | ioc->name, scmd); | 3022 | ioc->name, scmd); |
3020 | scsi_print_command(scmd); | 3023 | scsi_print_command(scmd); |
3021 | 3024 | ||
3022 | if (ioc->is_driver_loading) { | 3025 | if (ioc->is_driver_loading || ioc->remove_host) { |
3023 | pr_info(MPT3SAS_FMT "Blocking the host reset\n", | 3026 | pr_info(MPT3SAS_FMT "Blocking the host reset\n", |
3024 | ioc->name); | 3027 | ioc->name); |
3025 | r = FAILED; | 3028 | r = FAILED; |
@@ -4453,7 +4456,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) | |||
4453 | st = scsi_cmd_priv(scmd); | 4456 | st = scsi_cmd_priv(scmd); |
4454 | mpt3sas_base_clear_st(ioc, st); | 4457 | mpt3sas_base_clear_st(ioc, st); |
4455 | scsi_dma_unmap(scmd); | 4458 | scsi_dma_unmap(scmd); |
4456 | if (ioc->pci_error_recovery) | 4459 | if (ioc->pci_error_recovery || ioc->remove_host) |
4457 | scmd->result = DID_NO_CONNECT << 16; | 4460 | scmd->result = DID_NO_CONNECT << 16; |
4458 | else | 4461 | else |
4459 | scmd->result = DID_RESET << 16; | 4462 | scmd->result = DID_RESET << 16; |
@@ -9739,6 +9742,10 @@ static void scsih_remove(struct pci_dev *pdev) | |||
9739 | unsigned long flags; | 9742 | unsigned long flags; |
9740 | 9743 | ||
9741 | ioc->remove_host = 1; | 9744 | ioc->remove_host = 1; |
9745 | |||
9746 | mpt3sas_wait_for_commands_to_complete(ioc); | ||
9747 | _scsih_flush_running_cmds(ioc); | ||
9748 | |||
9742 | _scsih_fw_event_cleanup_queue(ioc); | 9749 | _scsih_fw_event_cleanup_queue(ioc); |
9743 | 9750 | ||
9744 | spin_lock_irqsave(&ioc->fw_event_lock, flags); | 9751 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
@@ -9815,6 +9822,10 @@ scsih_shutdown(struct pci_dev *pdev) | |||
9815 | unsigned long flags; | 9822 | unsigned long flags; |
9816 | 9823 | ||
9817 | ioc->remove_host = 1; | 9824 | ioc->remove_host = 1; |
9825 | |||
9826 | mpt3sas_wait_for_commands_to_complete(ioc); | ||
9827 | _scsih_flush_running_cmds(ioc); | ||
9828 | |||
9818 | _scsih_fw_event_cleanup_queue(ioc); | 9829 | _scsih_fw_event_cleanup_queue(ioc); |
9819 | 9830 | ||
9820 | spin_lock_irqsave(&ioc->fw_event_lock, flags); | 9831 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
@@ -10547,7 +10558,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
10547 | snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), | 10558 | snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), |
10548 | "fw_event_%s%d", ioc->driver_name, ioc->id); | 10559 | "fw_event_%s%d", ioc->driver_name, ioc->id); |
10549 | ioc->firmware_event_thread = alloc_ordered_workqueue( | 10560 | ioc->firmware_event_thread = alloc_ordered_workqueue( |
10550 | ioc->firmware_event_name, WQ_MEM_RECLAIM); | 10561 | ioc->firmware_event_name, 0); |
10551 | if (!ioc->firmware_event_thread) { | 10562 | if (!ioc->firmware_event_thread) { |
10552 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", | 10563 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", |
10553 | ioc->name, __FILE__, __LINE__, __func__); | 10564 | ioc->name, __FILE__, __LINE__, __func__); |
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 667d7697ba01..d09afe1b567d 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c | |||
@@ -762,6 +762,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, | |||
762 | 762 | ||
763 | iscsi_cid = cqe->conn_id; | 763 | iscsi_cid = cqe->conn_id; |
764 | qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; | 764 | qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; |
765 | if (!qedi_conn) { | ||
766 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, | ||
767 | "icid not found 0x%x\n", cqe->conn_id); | ||
768 | return; | ||
769 | } | ||
765 | 770 | ||
766 | /* Based on this itt get the corresponding qedi_cmd */ | 771 | /* Based on this itt get the corresponding qedi_cmd */ |
767 | spin_lock_bh(&qedi_conn->tmf_work_lock); | 772 | spin_lock_bh(&qedi_conn->tmf_work_lock); |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index be7d6824581a..c9689f97c307 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -261,9 +261,9 @@ | |||
261 | struct name_list_extended { | 261 | struct name_list_extended { |
262 | struct get_name_list_extended *l; | 262 | struct get_name_list_extended *l; |
263 | dma_addr_t ldma; | 263 | dma_addr_t ldma; |
264 | struct list_head fcports; /* protect by sess_list */ | 264 | struct list_head fcports; |
265 | spinlock_t fcports_lock; | ||
265 | u32 size; | 266 | u32 size; |
266 | u8 sent; | ||
267 | }; | 267 | }; |
268 | /* | 268 | /* |
269 | * Timeout timer counts in seconds | 269 | * Timeout timer counts in seconds |
@@ -2217,6 +2217,7 @@ typedef struct { | |||
2217 | 2217 | ||
2218 | /* FCP-4 types */ | 2218 | /* FCP-4 types */ |
2219 | #define FC4_TYPE_FCP_SCSI 0x08 | 2219 | #define FC4_TYPE_FCP_SCSI 0x08 |
2220 | #define FC4_TYPE_NVME 0x28 | ||
2220 | #define FC4_TYPE_OTHER 0x0 | 2221 | #define FC4_TYPE_OTHER 0x0 |
2221 | #define FC4_TYPE_UNKNOWN 0xff | 2222 | #define FC4_TYPE_UNKNOWN 0xff |
2222 | 2223 | ||
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 5bf9a59432f6..403fa096f8c8 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -3179,6 +3179,7 @@ done_free_sp: | |||
3179 | sp->free(sp); | 3179 | sp->free(sp); |
3180 | fcport->flags &= ~FCF_ASYNC_SENT; | 3180 | fcport->flags &= ~FCF_ASYNC_SENT; |
3181 | done: | 3181 | done: |
3182 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
3182 | return rval; | 3183 | return rval; |
3183 | } | 3184 | } |
3184 | 3185 | ||
@@ -3370,6 +3371,7 @@ done_free_sp: | |||
3370 | sp->free(sp); | 3371 | sp->free(sp); |
3371 | fcport->flags &= ~FCF_ASYNC_SENT; | 3372 | fcport->flags &= ~FCF_ASYNC_SENT; |
3372 | done: | 3373 | done: |
3374 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
3373 | return rval; | 3375 | return rval; |
3374 | } | 3376 | } |
3375 | 3377 | ||
@@ -3971,6 +3973,9 @@ out: | |||
3971 | spin_lock_irqsave(&vha->work_lock, flags); | 3973 | spin_lock_irqsave(&vha->work_lock, flags); |
3972 | vha->scan.scan_flags &= ~SF_SCANNING; | 3974 | vha->scan.scan_flags &= ~SF_SCANNING; |
3973 | spin_unlock_irqrestore(&vha->work_lock, flags); | 3975 | spin_unlock_irqrestore(&vha->work_lock, flags); |
3976 | |||
3977 | if ((fc4type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled) | ||
3978 | qla24xx_async_gpnft(vha, FC4_TYPE_NVME); | ||
3974 | } | 3979 | } |
3975 | 3980 | ||
3976 | static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) | 3981 | static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 2dea1129d396..00329dda6179 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -213,6 +213,7 @@ done_free_sp: | |||
213 | sp->free(sp); | 213 | sp->free(sp); |
214 | fcport->flags &= ~FCF_ASYNC_SENT; | 214 | fcport->flags &= ~FCF_ASYNC_SENT; |
215 | done: | 215 | done: |
216 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
216 | return rval; | 217 | return rval; |
217 | } | 218 | } |
218 | 219 | ||
@@ -263,7 +264,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
263 | done_free_sp: | 264 | done_free_sp: |
264 | sp->free(sp); | 265 | sp->free(sp); |
265 | done: | 266 | done: |
266 | fcport->flags &= ~FCF_ASYNC_SENT; | 267 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
267 | return rval; | 268 | return rval; |
268 | } | 269 | } |
269 | 270 | ||
@@ -271,6 +272,7 @@ void | |||
271 | qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, | 272 | qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, |
272 | uint16_t *data) | 273 | uint16_t *data) |
273 | { | 274 | { |
275 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
274 | /* Don't re-login in target mode */ | 276 | /* Don't re-login in target mode */ |
275 | if (!fcport->tgt_session) | 277 | if (!fcport->tgt_session) |
276 | qla2x00_mark_device_lost(vha, fcport, 1, 0); | 278 | qla2x00_mark_device_lost(vha, fcport, 1, 0); |
@@ -284,6 +286,7 @@ qla2x00_async_prlo_sp_done(void *s, int res) | |||
284 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 286 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
285 | struct scsi_qla_host *vha = sp->vha; | 287 | struct scsi_qla_host *vha = sp->vha; |
286 | 288 | ||
289 | sp->fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
287 | if (!test_bit(UNLOADING, &vha->dpc_flags)) | 290 | if (!test_bit(UNLOADING, &vha->dpc_flags)) |
288 | qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, | 291 | qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, |
289 | lio->u.logio.data); | 292 | lio->u.logio.data); |
@@ -322,6 +325,7 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
322 | done_free_sp: | 325 | done_free_sp: |
323 | sp->free(sp); | 326 | sp->free(sp); |
324 | done: | 327 | done: |
328 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
325 | return rval; | 329 | return rval; |
326 | } | 330 | } |
327 | 331 | ||
@@ -375,6 +379,8 @@ qla2x00_async_adisc_sp_done(void *ptr, int res) | |||
375 | "Async done-%s res %x %8phC\n", | 379 | "Async done-%s res %x %8phC\n", |
376 | sp->name, res, sp->fcport->port_name); | 380 | sp->name, res, sp->fcport->port_name); |
377 | 381 | ||
382 | sp->fcport->flags &= ~FCF_ASYNC_SENT; | ||
383 | |||
378 | memset(&ea, 0, sizeof(ea)); | 384 | memset(&ea, 0, sizeof(ea)); |
379 | ea.event = FCME_ADISC_DONE; | 385 | ea.event = FCME_ADISC_DONE; |
380 | ea.rc = res; | 386 | ea.rc = res; |
@@ -425,7 +431,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
425 | done_free_sp: | 431 | done_free_sp: |
426 | sp->free(sp); | 432 | sp->free(sp); |
427 | done: | 433 | done: |
428 | fcport->flags &= ~FCF_ASYNC_SENT; | 434 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
429 | qla2x00_post_async_adisc_work(vha, fcport, data); | 435 | qla2x00_post_async_adisc_work(vha, fcport, data); |
430 | return rval; | 436 | return rval; |
431 | } | 437 | } |
@@ -643,8 +649,7 @@ qla24xx_async_gnl_sp_done(void *s, int res) | |||
643 | (loop_id & 0x7fff)); | 649 | (loop_id & 0x7fff)); |
644 | } | 650 | } |
645 | 651 | ||
646 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | 652 | spin_lock_irqsave(&vha->gnl.fcports_lock, flags); |
647 | vha->gnl.sent = 0; | ||
648 | 653 | ||
649 | INIT_LIST_HEAD(&h); | 654 | INIT_LIST_HEAD(&h); |
650 | fcport = tf = NULL; | 655 | fcport = tf = NULL; |
@@ -653,12 +658,16 @@ qla24xx_async_gnl_sp_done(void *s, int res) | |||
653 | 658 | ||
654 | list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { | 659 | list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { |
655 | list_del_init(&fcport->gnl_entry); | 660 | list_del_init(&fcport->gnl_entry); |
661 | spin_lock(&vha->hw->tgt.sess_lock); | ||
656 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); | 662 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
663 | spin_unlock(&vha->hw->tgt.sess_lock); | ||
657 | ea.fcport = fcport; | 664 | ea.fcport = fcport; |
658 | 665 | ||
659 | qla2x00_fcport_event_handler(vha, &ea); | 666 | qla2x00_fcport_event_handler(vha, &ea); |
660 | } | 667 | } |
668 | spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); | ||
661 | 669 | ||
670 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | ||
662 | /* create new fcport if fw has knowledge of new sessions */ | 671 | /* create new fcport if fw has knowledge of new sessions */ |
663 | for (i = 0; i < n; i++) { | 672 | for (i = 0; i < n; i++) { |
664 | port_id_t id; | 673 | port_id_t id; |
@@ -710,18 +719,21 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
710 | ql_dbg(ql_dbg_disc, vha, 0x20d9, | 719 | ql_dbg(ql_dbg_disc, vha, 0x20d9, |
711 | "Async-gnlist WWPN %8phC \n", fcport->port_name); | 720 | "Async-gnlist WWPN %8phC \n", fcport->port_name); |
712 | 721 | ||
713 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | 722 | spin_lock_irqsave(&vha->gnl.fcports_lock, flags); |
723 | if (!list_empty(&fcport->gnl_entry)) { | ||
724 | spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); | ||
725 | rval = QLA_SUCCESS; | ||
726 | goto done; | ||
727 | } | ||
728 | |||
729 | spin_lock(&vha->hw->tgt.sess_lock); | ||
714 | fcport->disc_state = DSC_GNL; | 730 | fcport->disc_state = DSC_GNL; |
715 | fcport->last_rscn_gen = fcport->rscn_gen; | 731 | fcport->last_rscn_gen = fcport->rscn_gen; |
716 | fcport->last_login_gen = fcport->login_gen; | 732 | fcport->last_login_gen = fcport->login_gen; |
733 | spin_unlock(&vha->hw->tgt.sess_lock); | ||
717 | 734 | ||
718 | list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); | 735 | list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); |
719 | if (vha->gnl.sent) { | 736 | spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); |
720 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | ||
721 | return QLA_SUCCESS; | ||
722 | } | ||
723 | vha->gnl.sent = 1; | ||
724 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | ||
725 | 737 | ||
726 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); | 738 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); |
727 | if (!sp) | 739 | if (!sp) |
@@ -1049,6 +1061,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) | |||
1049 | fc_port_t *fcport = ea->fcport; | 1061 | fc_port_t *fcport = ea->fcport; |
1050 | struct port_database_24xx *pd; | 1062 | struct port_database_24xx *pd; |
1051 | struct srb *sp = ea->sp; | 1063 | struct srb *sp = ea->sp; |
1064 | uint8_t ls; | ||
1052 | 1065 | ||
1053 | pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; | 1066 | pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; |
1054 | 1067 | ||
@@ -1061,7 +1074,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) | |||
1061 | if (fcport->disc_state == DSC_DELETE_PEND) | 1074 | if (fcport->disc_state == DSC_DELETE_PEND) |
1062 | return; | 1075 | return; |
1063 | 1076 | ||
1064 | switch (pd->current_login_state) { | 1077 | if (fcport->fc4f_nvme) |
1078 | ls = pd->current_login_state >> 4; | ||
1079 | else | ||
1080 | ls = pd->current_login_state & 0xf; | ||
1081 | |||
1082 | switch (ls) { | ||
1065 | case PDS_PRLI_COMPLETE: | 1083 | case PDS_PRLI_COMPLETE: |
1066 | __qla24xx_parse_gpdb(vha, fcport, pd); | 1084 | __qla24xx_parse_gpdb(vha, fcport, pd); |
1067 | break; | 1085 | break; |
@@ -1151,8 +1169,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
1151 | if (fcport->scan_state != QLA_FCPORT_FOUND) | 1169 | if (fcport->scan_state != QLA_FCPORT_FOUND) |
1152 | return 0; | 1170 | return 0; |
1153 | 1171 | ||
1154 | if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || | 1172 | if ((fcport->loop_id != FC_NO_LOOP_ID) && |
1155 | (fcport->fw_login_state == DSC_LS_PRLI_PEND)) | 1173 | ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || |
1174 | (fcport->fw_login_state == DSC_LS_PRLI_PEND))) | ||
1156 | return 0; | 1175 | return 0; |
1157 | 1176 | ||
1158 | if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { | 1177 | if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { |
@@ -1527,6 +1546,7 @@ qla24xx_abort_sp_done(void *ptr, int res) | |||
1527 | srb_t *sp = ptr; | 1546 | srb_t *sp = ptr; |
1528 | struct srb_iocb *abt = &sp->u.iocb_cmd; | 1547 | struct srb_iocb *abt = &sp->u.iocb_cmd; |
1529 | 1548 | ||
1549 | del_timer(&sp->u.iocb_cmd.timer); | ||
1530 | complete(&abt->u.abt.comp); | 1550 | complete(&abt->u.abt.comp); |
1531 | } | 1551 | } |
1532 | 1552 | ||
@@ -1791,6 +1811,7 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
1791 | qla2x00_mark_device_lost(vha, fcport, 1, 0); | 1811 | qla2x00_mark_device_lost(vha, fcport, 1, 0); |
1792 | qlt_logo_completion_handler(fcport, data[0]); | 1812 | qlt_logo_completion_handler(fcport, data[0]); |
1793 | fcport->login_gen++; | 1813 | fcport->login_gen++; |
1814 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
1794 | return; | 1815 | return; |
1795 | } | 1816 | } |
1796 | 1817 | ||
@@ -1798,6 +1819,7 @@ void | |||
1798 | qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, | 1819 | qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, |
1799 | uint16_t *data) | 1820 | uint16_t *data) |
1800 | { | 1821 | { |
1822 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); | ||
1801 | if (data[0] == MBS_COMMAND_COMPLETE) { | 1823 | if (data[0] == MBS_COMMAND_COMPLETE) { |
1802 | qla2x00_update_fcport(vha, fcport); | 1824 | qla2x00_update_fcport(vha, fcport); |
1803 | 1825 | ||
@@ -1805,7 +1827,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
1805 | } | 1827 | } |
1806 | 1828 | ||
1807 | /* Retry login. */ | 1829 | /* Retry login. */ |
1808 | fcport->flags &= ~FCF_ASYNC_SENT; | ||
1809 | if (data[1] & QLA_LOGIO_LOGIN_RETRIED) | 1830 | if (data[1] & QLA_LOGIO_LOGIN_RETRIED) |
1810 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | 1831 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); |
1811 | else | 1832 | else |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index afcb5567998a..5c5dcca4d1da 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -454,7 +454,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, | |||
454 | ha->req_q_map[0] = req; | 454 | ha->req_q_map[0] = req; |
455 | set_bit(0, ha->rsp_qid_map); | 455 | set_bit(0, ha->rsp_qid_map); |
456 | set_bit(0, ha->req_qid_map); | 456 | set_bit(0, ha->req_qid_map); |
457 | return 1; | 457 | return 0; |
458 | 458 | ||
459 | fail_qpair_map: | 459 | fail_qpair_map: |
460 | kfree(ha->base_qpair); | 460 | kfree(ha->base_qpair); |
@@ -471,6 +471,9 @@ fail_req_map: | |||
471 | 471 | ||
472 | static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) | 472 | static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) |
473 | { | 473 | { |
474 | if (!ha->req_q_map) | ||
475 | return; | ||
476 | |||
474 | if (IS_QLAFX00(ha)) { | 477 | if (IS_QLAFX00(ha)) { |
475 | if (req && req->ring_fx00) | 478 | if (req && req->ring_fx00) |
476 | dma_free_coherent(&ha->pdev->dev, | 479 | dma_free_coherent(&ha->pdev->dev, |
@@ -481,14 +484,17 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) | |||
481 | (req->length + 1) * sizeof(request_t), | 484 | (req->length + 1) * sizeof(request_t), |
482 | req->ring, req->dma); | 485 | req->ring, req->dma); |
483 | 486 | ||
484 | if (req) | 487 | if (req) { |
485 | kfree(req->outstanding_cmds); | 488 | kfree(req->outstanding_cmds); |
486 | 489 | kfree(req); | |
487 | kfree(req); | 490 | } |
488 | } | 491 | } |
489 | 492 | ||
490 | static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) | 493 | static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) |
491 | { | 494 | { |
495 | if (!ha->rsp_q_map) | ||
496 | return; | ||
497 | |||
492 | if (IS_QLAFX00(ha)) { | 498 | if (IS_QLAFX00(ha)) { |
493 | if (rsp && rsp->ring) | 499 | if (rsp && rsp->ring) |
494 | dma_free_coherent(&ha->pdev->dev, | 500 | dma_free_coherent(&ha->pdev->dev, |
@@ -499,7 +505,8 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
499 | (rsp->length + 1) * sizeof(response_t), | 505 | (rsp->length + 1) * sizeof(response_t), |
500 | rsp->ring, rsp->dma); | 506 | rsp->ring, rsp->dma); |
501 | } | 507 | } |
502 | kfree(rsp); | 508 | if (rsp) |
509 | kfree(rsp); | ||
503 | } | 510 | } |
504 | 511 | ||
505 | static void qla2x00_free_queues(struct qla_hw_data *ha) | 512 | static void qla2x00_free_queues(struct qla_hw_data *ha) |
@@ -1723,6 +1730,8 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) | |||
1723 | struct qla_tgt_cmd *cmd; | 1730 | struct qla_tgt_cmd *cmd; |
1724 | uint8_t trace = 0; | 1731 | uint8_t trace = 0; |
1725 | 1732 | ||
1733 | if (!ha->req_q_map) | ||
1734 | return; | ||
1726 | spin_lock_irqsave(qp->qp_lock_ptr, flags); | 1735 | spin_lock_irqsave(qp->qp_lock_ptr, flags); |
1727 | req = qp->req; | 1736 | req = qp->req; |
1728 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { | 1737 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { |
@@ -3095,14 +3104,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3095 | /* Set up the irqs */ | 3104 | /* Set up the irqs */ |
3096 | ret = qla2x00_request_irqs(ha, rsp); | 3105 | ret = qla2x00_request_irqs(ha, rsp); |
3097 | if (ret) | 3106 | if (ret) |
3098 | goto probe_hw_failed; | 3107 | goto probe_failed; |
3099 | 3108 | ||
3100 | /* Alloc arrays of request and response ring ptrs */ | 3109 | /* Alloc arrays of request and response ring ptrs */ |
3101 | if (!qla2x00_alloc_queues(ha, req, rsp)) { | 3110 | if (qla2x00_alloc_queues(ha, req, rsp)) { |
3102 | ql_log(ql_log_fatal, base_vha, 0x003d, | 3111 | ql_log(ql_log_fatal, base_vha, 0x003d, |
3103 | "Failed to allocate memory for queue pointers..." | 3112 | "Failed to allocate memory for queue pointers..." |
3104 | "aborting.\n"); | 3113 | "aborting.\n"); |
3105 | goto probe_init_failed; | 3114 | goto probe_failed; |
3106 | } | 3115 | } |
3107 | 3116 | ||
3108 | if (ha->mqenable && shost_use_blk_mq(host)) { | 3117 | if (ha->mqenable && shost_use_blk_mq(host)) { |
@@ -3387,15 +3396,6 @@ skip_dpc: | |||
3387 | 3396 | ||
3388 | return 0; | 3397 | return 0; |
3389 | 3398 | ||
3390 | probe_init_failed: | ||
3391 | qla2x00_free_req_que(ha, req); | ||
3392 | ha->req_q_map[0] = NULL; | ||
3393 | clear_bit(0, ha->req_qid_map); | ||
3394 | qla2x00_free_rsp_que(ha, rsp); | ||
3395 | ha->rsp_q_map[0] = NULL; | ||
3396 | clear_bit(0, ha->rsp_qid_map); | ||
3397 | ha->max_req_queues = ha->max_rsp_queues = 0; | ||
3398 | |||
3399 | probe_failed: | 3399 | probe_failed: |
3400 | if (base_vha->timer_active) | 3400 | if (base_vha->timer_active) |
3401 | qla2x00_stop_timer(base_vha); | 3401 | qla2x00_stop_timer(base_vha); |
@@ -4508,11 +4508,17 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
4508 | if (ha->init_cb) | 4508 | if (ha->init_cb) |
4509 | dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, | 4509 | dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, |
4510 | ha->init_cb, ha->init_cb_dma); | 4510 | ha->init_cb, ha->init_cb_dma); |
4511 | vfree(ha->optrom_buffer); | 4511 | |
4512 | kfree(ha->nvram); | 4512 | if (ha->optrom_buffer) |
4513 | kfree(ha->npiv_info); | 4513 | vfree(ha->optrom_buffer); |
4514 | kfree(ha->swl); | 4514 | if (ha->nvram) |
4515 | kfree(ha->loop_id_map); | 4515 | kfree(ha->nvram); |
4516 | if (ha->npiv_info) | ||
4517 | kfree(ha->npiv_info); | ||
4518 | if (ha->swl) | ||
4519 | kfree(ha->swl); | ||
4520 | if (ha->loop_id_map) | ||
4521 | kfree(ha->loop_id_map); | ||
4516 | 4522 | ||
4517 | ha->srb_mempool = NULL; | 4523 | ha->srb_mempool = NULL; |
4518 | ha->ctx_mempool = NULL; | 4524 | ha->ctx_mempool = NULL; |
@@ -4528,6 +4534,15 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
4528 | ha->ex_init_cb_dma = 0; | 4534 | ha->ex_init_cb_dma = 0; |
4529 | ha->async_pd = NULL; | 4535 | ha->async_pd = NULL; |
4530 | ha->async_pd_dma = 0; | 4536 | ha->async_pd_dma = 0; |
4537 | ha->loop_id_map = NULL; | ||
4538 | ha->npiv_info = NULL; | ||
4539 | ha->optrom_buffer = NULL; | ||
4540 | ha->swl = NULL; | ||
4541 | ha->nvram = NULL; | ||
4542 | ha->mctp_dump = NULL; | ||
4543 | ha->dcbx_tlv = NULL; | ||
4544 | ha->xgmac_data = NULL; | ||
4545 | ha->sfp_data = NULL; | ||
4531 | 4546 | ||
4532 | ha->s_dma_pool = NULL; | 4547 | ha->s_dma_pool = NULL; |
4533 | ha->dl_dma_pool = NULL; | 4548 | ha->dl_dma_pool = NULL; |
@@ -4577,6 +4592,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, | |||
4577 | 4592 | ||
4578 | spin_lock_init(&vha->work_lock); | 4593 | spin_lock_init(&vha->work_lock); |
4579 | spin_lock_init(&vha->cmd_list_lock); | 4594 | spin_lock_init(&vha->cmd_list_lock); |
4595 | spin_lock_init(&vha->gnl.fcports_lock); | ||
4580 | init_waitqueue_head(&vha->fcport_waitQ); | 4596 | init_waitqueue_head(&vha->fcport_waitQ); |
4581 | init_waitqueue_head(&vha->vref_waitq); | 4597 | init_waitqueue_head(&vha->vref_waitq); |
4582 | 4598 | ||
@@ -4806,9 +4822,12 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
4806 | fcport->d_id = e->u.new_sess.id; | 4822 | fcport->d_id = e->u.new_sess.id; |
4807 | fcport->flags |= FCF_FABRIC_DEVICE; | 4823 | fcport->flags |= FCF_FABRIC_DEVICE; |
4808 | fcport->fw_login_state = DSC_LS_PLOGI_PEND; | 4824 | fcport->fw_login_state = DSC_LS_PLOGI_PEND; |
4809 | if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) | 4825 | if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) { |
4810 | fcport->fc4_type = FC4_TYPE_FCP_SCSI; | 4826 | fcport->fc4_type = FC4_TYPE_FCP_SCSI; |
4811 | 4827 | } else if (e->u.new_sess.fc4_type == FC4_TYPE_NVME) { | |
4828 | fcport->fc4_type = FC4_TYPE_OTHER; | ||
4829 | fcport->fc4f_nvme = FC4_TYPE_NVME; | ||
4830 | } | ||
4812 | memcpy(fcport->port_name, e->u.new_sess.port_name, | 4831 | memcpy(fcport->port_name, e->u.new_sess.port_name, |
4813 | WWN_SIZE); | 4832 | WWN_SIZE); |
4814 | } else { | 4833 | } else { |
@@ -4877,6 +4896,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
4877 | } | 4896 | } |
4878 | qlt_plogi_ack_unref(vha, pla); | 4897 | qlt_plogi_ack_unref(vha, pla); |
4879 | } else { | 4898 | } else { |
4899 | fc_port_t *dfcp = NULL; | ||
4900 | |||
4880 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | 4901 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); |
4881 | tfcp = qla2x00_find_fcport_by_nportid(vha, | 4902 | tfcp = qla2x00_find_fcport_by_nportid(vha, |
4882 | &e->u.new_sess.id, 1); | 4903 | &e->u.new_sess.id, 1); |
@@ -4899,11 +4920,13 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
4899 | default: | 4920 | default: |
4900 | fcport->login_pause = 1; | 4921 | fcport->login_pause = 1; |
4901 | tfcp->conflict = fcport; | 4922 | tfcp->conflict = fcport; |
4902 | qlt_schedule_sess_for_deletion(tfcp); | 4923 | dfcp = tfcp; |
4903 | break; | 4924 | break; |
4904 | } | 4925 | } |
4905 | } | 4926 | } |
4906 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | 4927 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); |
4928 | if (dfcp) | ||
4929 | qlt_schedule_sess_for_deletion(tfcp); | ||
4907 | 4930 | ||
4908 | wwn = wwn_to_u64(fcport->node_name); | 4931 | wwn = wwn_to_u64(fcport->node_name); |
4909 | 4932 | ||
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 896b2d8bd803..b49ac85f3de2 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -1224,10 +1224,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess) | |||
1224 | } | 1224 | } |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | /* ha->tgt.sess_lock supposed to be held on entry */ | ||
1228 | void qlt_schedule_sess_for_deletion(struct fc_port *sess) | 1227 | void qlt_schedule_sess_for_deletion(struct fc_port *sess) |
1229 | { | 1228 | { |
1230 | struct qla_tgt *tgt = sess->tgt; | 1229 | struct qla_tgt *tgt = sess->tgt; |
1230 | struct qla_hw_data *ha = sess->vha->hw; | ||
1231 | unsigned long flags; | 1231 | unsigned long flags; |
1232 | 1232 | ||
1233 | if (sess->disc_state == DSC_DELETE_PEND) | 1233 | if (sess->disc_state == DSC_DELETE_PEND) |
@@ -1244,16 +1244,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) | |||
1244 | return; | 1244 | return; |
1245 | } | 1245 | } |
1246 | 1246 | ||
1247 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); | ||
1247 | if (sess->deleted == QLA_SESS_DELETED) | 1248 | if (sess->deleted == QLA_SESS_DELETED) |
1248 | sess->logout_on_delete = 0; | 1249 | sess->logout_on_delete = 0; |
1249 | 1250 | ||
1250 | spin_lock_irqsave(&sess->vha->work_lock, flags); | ||
1251 | if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { | 1251 | if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { |
1252 | spin_unlock_irqrestore(&sess->vha->work_lock, flags); | 1252 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
1253 | return; | 1253 | return; |
1254 | } | 1254 | } |
1255 | sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; | 1255 | sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; |
1256 | spin_unlock_irqrestore(&sess->vha->work_lock, flags); | 1256 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
1257 | 1257 | ||
1258 | sess->disc_state = DSC_DELETE_PEND; | 1258 | sess->disc_state = DSC_DELETE_PEND; |
1259 | 1259 | ||
@@ -1262,13 +1262,10 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) | |||
1262 | ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, | 1262 | ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, |
1263 | "Scheduling sess %p for deletion\n", sess); | 1263 | "Scheduling sess %p for deletion\n", sess); |
1264 | 1264 | ||
1265 | /* use cancel to push work element through before re-queue */ | ||
1266 | cancel_work_sync(&sess->del_work); | ||
1267 | INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); | 1265 | INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); |
1268 | queue_work(sess->vha->hw->wq, &sess->del_work); | 1266 | WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); |
1269 | } | 1267 | } |
1270 | 1268 | ||
1271 | /* ha->tgt.sess_lock supposed to be held on entry */ | ||
1272 | static void qlt_clear_tgt_db(struct qla_tgt *tgt) | 1269 | static void qlt_clear_tgt_db(struct qla_tgt *tgt) |
1273 | { | 1270 | { |
1274 | struct fc_port *sess; | 1271 | struct fc_port *sess; |
@@ -1451,8 +1448,8 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) | |||
1451 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); | 1448 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); |
1452 | 1449 | ||
1453 | sess->local = 1; | 1450 | sess->local = 1; |
1454 | qlt_schedule_sess_for_deletion(sess); | ||
1455 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | 1451 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); |
1452 | qlt_schedule_sess_for_deletion(sess); | ||
1456 | } | 1453 | } |
1457 | 1454 | ||
1458 | static inline int test_tgt_sess_count(struct qla_tgt *tgt) | 1455 | static inline int test_tgt_sess_count(struct qla_tgt *tgt) |
@@ -1512,10 +1509,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt) | |||
1512 | * Lock is needed, because we still can get an incoming packet. | 1509 | * Lock is needed, because we still can get an incoming packet. |
1513 | */ | 1510 | */ |
1514 | mutex_lock(&vha->vha_tgt.tgt_mutex); | 1511 | mutex_lock(&vha->vha_tgt.tgt_mutex); |
1515 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); | ||
1516 | tgt->tgt_stop = 1; | 1512 | tgt->tgt_stop = 1; |
1517 | qlt_clear_tgt_db(tgt); | 1513 | qlt_clear_tgt_db(tgt); |
1518 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | ||
1519 | mutex_unlock(&vha->vha_tgt.tgt_mutex); | 1514 | mutex_unlock(&vha->vha_tgt.tgt_mutex); |
1520 | mutex_unlock(&qla_tgt_mutex); | 1515 | mutex_unlock(&qla_tgt_mutex); |
1521 | 1516 | ||
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index d042915ce895..ca53a5f785ee 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -223,7 +223,8 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd) | |||
223 | 223 | ||
224 | static void scsi_eh_inc_host_failed(struct rcu_head *head) | 224 | static void scsi_eh_inc_host_failed(struct rcu_head *head) |
225 | { | 225 | { |
226 | struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu); | 226 | struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); |
227 | struct Scsi_Host *shost = scmd->device->host; | ||
227 | unsigned long flags; | 228 | unsigned long flags; |
228 | 229 | ||
229 | spin_lock_irqsave(shost->host_lock, flags); | 230 | spin_lock_irqsave(shost->host_lock, flags); |
@@ -259,7 +260,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) | |||
259 | * Ensure that all tasks observe the host state change before the | 260 | * Ensure that all tasks observe the host state change before the |
260 | * host_failed change. | 261 | * host_failed change. |
261 | */ | 262 | */ |
262 | call_rcu(&shost->rcu, scsi_eh_inc_host_failed); | 263 | call_rcu(&scmd->rcu, scsi_eh_inc_host_failed); |
263 | } | 264 | } |
264 | 265 | ||
265 | /** | 266 | /** |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a86df9ca7d1c..c84f931388f2 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -671,6 +671,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, | |||
671 | if (!blk_rq_is_scsi(req)) { | 671 | if (!blk_rq_is_scsi(req)) { |
672 | WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); | 672 | WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); |
673 | cmd->flags &= ~SCMD_INITIALIZED; | 673 | cmd->flags &= ~SCMD_INITIALIZED; |
674 | destroy_rcu_head(&cmd->rcu); | ||
674 | } | 675 | } |
675 | 676 | ||
676 | if (req->mq_ctx) { | 677 | if (req->mq_ctx) { |
@@ -720,6 +721,8 @@ static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd, | |||
720 | int result) | 721 | int result) |
721 | { | 722 | { |
722 | switch (host_byte(result)) { | 723 | switch (host_byte(result)) { |
724 | case DID_OK: | ||
725 | return BLK_STS_OK; | ||
723 | case DID_TRANSPORT_FAILFAST: | 726 | case DID_TRANSPORT_FAILFAST: |
724 | return BLK_STS_TRANSPORT; | 727 | return BLK_STS_TRANSPORT; |
725 | case DID_TARGET_FAILURE: | 728 | case DID_TARGET_FAILURE: |
@@ -1151,6 +1154,7 @@ static void scsi_initialize_rq(struct request *rq) | |||
1151 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | 1154 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1152 | 1155 | ||
1153 | scsi_req_init(&cmd->req); | 1156 | scsi_req_init(&cmd->req); |
1157 | init_rcu_head(&cmd->rcu); | ||
1154 | cmd->jiffies_at_alloc = jiffies; | 1158 | cmd->jiffies_at_alloc = jiffies; |
1155 | cmd->retries = 0; | 1159 | cmd->retries = 0; |
1156 | } | 1160 | } |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bff21e636ddd..3541caf3fceb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2595,6 +2595,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2595 | int res; | 2595 | int res; |
2596 | struct scsi_device *sdp = sdkp->device; | 2596 | struct scsi_device *sdp = sdkp->device; |
2597 | struct scsi_mode_data data; | 2597 | struct scsi_mode_data data; |
2598 | int disk_ro = get_disk_ro(sdkp->disk); | ||
2598 | int old_wp = sdkp->write_prot; | 2599 | int old_wp = sdkp->write_prot; |
2599 | 2600 | ||
2600 | set_disk_ro(sdkp->disk, 0); | 2601 | set_disk_ro(sdkp->disk, 0); |
@@ -2635,7 +2636,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2635 | "Test WP failed, assume Write Enabled\n"); | 2636 | "Test WP failed, assume Write Enabled\n"); |
2636 | } else { | 2637 | } else { |
2637 | sdkp->write_prot = ((data.device_specific & 0x80) != 0); | 2638 | sdkp->write_prot = ((data.device_specific & 0x80) != 0); |
2638 | set_disk_ro(sdkp->disk, sdkp->write_prot); | 2639 | set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro); |
2639 | if (sdkp->first_scan || old_wp != sdkp->write_prot) { | 2640 | if (sdkp->first_scan || old_wp != sdkp->write_prot) { |
2640 | sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", | 2641 | sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", |
2641 | sdkp->write_prot ? "on" : "off"); | 2642 | sdkp->write_prot ? "on" : "off"); |
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 6c348a211ebb..89cf4498f535 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c | |||
@@ -403,7 +403,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf) | |||
403 | */ | 403 | */ |
404 | static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) | 404 | static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) |
405 | { | 405 | { |
406 | u64 zone_blocks; | 406 | u64 zone_blocks = 0; |
407 | sector_t block = 0; | 407 | sector_t block = 0; |
408 | unsigned char *buf; | 408 | unsigned char *buf; |
409 | unsigned char *rec; | 409 | unsigned char *rec; |
@@ -421,10 +421,8 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) | |||
421 | 421 | ||
422 | /* Do a report zone to get the same field */ | 422 | /* Do a report zone to get the same field */ |
423 | ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); | 423 | ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); |
424 | if (ret) { | 424 | if (ret) |
425 | zone_blocks = 0; | 425 | goto out_free; |
426 | goto out; | ||
427 | } | ||
428 | 426 | ||
429 | same = buf[4] & 0x0f; | 427 | same = buf[4] & 0x0f; |
430 | if (same > 0) { | 428 | if (same > 0) { |
@@ -464,7 +462,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) | |||
464 | ret = sd_zbc_report_zones(sdkp, buf, | 462 | ret = sd_zbc_report_zones(sdkp, buf, |
465 | SD_ZBC_BUF_SIZE, block); | 463 | SD_ZBC_BUF_SIZE, block); |
466 | if (ret) | 464 | if (ret) |
467 | return ret; | 465 | goto out_free; |
468 | } | 466 | } |
469 | 467 | ||
470 | } while (block < sdkp->capacity); | 468 | } while (block < sdkp->capacity); |
@@ -472,35 +470,32 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) | |||
472 | zone_blocks = sdkp->zone_blocks; | 470 | zone_blocks = sdkp->zone_blocks; |
473 | 471 | ||
474 | out: | 472 | out: |
475 | kfree(buf); | ||
476 | |||
477 | if (!zone_blocks) { | 473 | if (!zone_blocks) { |
478 | if (sdkp->first_scan) | 474 | if (sdkp->first_scan) |
479 | sd_printk(KERN_NOTICE, sdkp, | 475 | sd_printk(KERN_NOTICE, sdkp, |
480 | "Devices with non constant zone " | 476 | "Devices with non constant zone " |
481 | "size are not supported\n"); | 477 | "size are not supported\n"); |
482 | return -ENODEV; | 478 | ret = -ENODEV; |
483 | } | 479 | } else if (!is_power_of_2(zone_blocks)) { |
484 | |||
485 | if (!is_power_of_2(zone_blocks)) { | ||
486 | if (sdkp->first_scan) | 480 | if (sdkp->first_scan) |
487 | sd_printk(KERN_NOTICE, sdkp, | 481 | sd_printk(KERN_NOTICE, sdkp, |
488 | "Devices with non power of 2 zone " | 482 | "Devices with non power of 2 zone " |
489 | "size are not supported\n"); | 483 | "size are not supported\n"); |
490 | return -ENODEV; | 484 | ret = -ENODEV; |
491 | } | 485 | } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { |
492 | |||
493 | if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { | ||
494 | if (sdkp->first_scan) | 486 | if (sdkp->first_scan) |
495 | sd_printk(KERN_NOTICE, sdkp, | 487 | sd_printk(KERN_NOTICE, sdkp, |
496 | "Zone size too large\n"); | 488 | "Zone size too large\n"); |
497 | return -ENODEV; | 489 | ret = -ENODEV; |
490 | } else { | ||
491 | sdkp->zone_blocks = zone_blocks; | ||
492 | sdkp->zone_shift = ilog2(zone_blocks); | ||
498 | } | 493 | } |
499 | 494 | ||
500 | sdkp->zone_blocks = zone_blocks; | 495 | out_free: |
501 | sdkp->zone_shift = ilog2(zone_blocks); | 496 | kfree(buf); |
502 | 497 | ||
503 | return 0; | 498 | return ret; |
504 | } | 499 | } |
505 | 500 | ||
506 | /** | 501 | /** |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 6be5ab32c94f..8c51d628b52e 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
@@ -1311,7 +1311,8 @@ static int storvsc_do_io(struct hv_device *device, | |||
1311 | */ | 1311 | */ |
1312 | cpumask_and(&alloced_mask, &stor_device->alloced_cpus, | 1312 | cpumask_and(&alloced_mask, &stor_device->alloced_cpus, |
1313 | cpumask_of_node(cpu_to_node(q_num))); | 1313 | cpumask_of_node(cpu_to_node(q_num))); |
1314 | for_each_cpu(tgt_cpu, &alloced_mask) { | 1314 | for_each_cpu_wrap(tgt_cpu, &alloced_mask, |
1315 | outgoing_channel->target_cpu + 1) { | ||
1315 | if (tgt_cpu != outgoing_channel->target_cpu) { | 1316 | if (tgt_cpu != outgoing_channel->target_cpu) { |
1316 | outgoing_channel = | 1317 | outgoing_channel = |
1317 | stor_device->stor_chns[tgt_cpu]; | 1318 | stor_device->stor_chns[tgt_cpu]; |
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index cfb42f5eccb2..750f93197411 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c | |||
@@ -470,13 +470,21 @@ static int imx_gpc_probe(struct platform_device *pdev) | |||
470 | 470 | ||
471 | static int imx_gpc_remove(struct platform_device *pdev) | 471 | static int imx_gpc_remove(struct platform_device *pdev) |
472 | { | 472 | { |
473 | struct device_node *pgc_node; | ||
473 | int ret; | 474 | int ret; |
474 | 475 | ||
476 | pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc"); | ||
477 | |||
478 | /* bail out if DT too old and doesn't provide the necessary info */ | ||
479 | if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") && | ||
480 | !pgc_node) | ||
481 | return 0; | ||
482 | |||
475 | /* | 483 | /* |
476 | * If the old DT binding is used the toplevel driver needs to | 484 | * If the old DT binding is used the toplevel driver needs to |
477 | * de-register the power domains | 485 | * de-register the power domains |
478 | */ | 486 | */ |
479 | if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) { | 487 | if (!pgc_node) { |
480 | of_genpd_del_provider(pdev->dev.of_node); | 488 | of_genpd_del_provider(pdev->dev.of_node); |
481 | 489 | ||
482 | ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); | 490 | ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); |
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 6dbba5aff191..86580b6df33d 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c | |||
@@ -326,24 +326,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) | |||
326 | mutex_lock(&ashmem_mutex); | 326 | mutex_lock(&ashmem_mutex); |
327 | 327 | ||
328 | if (asma->size == 0) { | 328 | if (asma->size == 0) { |
329 | ret = -EINVAL; | 329 | mutex_unlock(&ashmem_mutex); |
330 | goto out; | 330 | return -EINVAL; |
331 | } | 331 | } |
332 | 332 | ||
333 | if (!asma->file) { | 333 | if (!asma->file) { |
334 | ret = -EBADF; | 334 | mutex_unlock(&ashmem_mutex); |
335 | goto out; | 335 | return -EBADF; |
336 | } | 336 | } |
337 | 337 | ||
338 | mutex_unlock(&ashmem_mutex); | ||
339 | |||
338 | ret = vfs_llseek(asma->file, offset, origin); | 340 | ret = vfs_llseek(asma->file, offset, origin); |
339 | if (ret < 0) | 341 | if (ret < 0) |
340 | goto out; | 342 | return ret; |
341 | 343 | ||
342 | /** Copy f_pos from backing file, since f_ops->llseek() sets it */ | 344 | /** Copy f_pos from backing file, since f_ops->llseek() sets it */ |
343 | file->f_pos = asma->file->f_pos; | 345 | file->f_pos = asma->file->f_pos; |
344 | |||
345 | out: | ||
346 | mutex_unlock(&ashmem_mutex); | ||
347 | return ret; | 346 | return ret; |
348 | } | 347 | } |
349 | 348 | ||
@@ -702,16 +701,14 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, | |||
702 | size_t pgstart, pgend; | 701 | size_t pgstart, pgend; |
703 | int ret = -EINVAL; | 702 | int ret = -EINVAL; |
704 | 703 | ||
704 | if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) | ||
705 | return -EFAULT; | ||
706 | |||
705 | mutex_lock(&ashmem_mutex); | 707 | mutex_lock(&ashmem_mutex); |
706 | 708 | ||
707 | if (unlikely(!asma->file)) | 709 | if (unlikely(!asma->file)) |
708 | goto out_unlock; | 710 | goto out_unlock; |
709 | 711 | ||
710 | if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) { | ||
711 | ret = -EFAULT; | ||
712 | goto out_unlock; | ||
713 | } | ||
714 | |||
715 | /* per custom, you can pass zero for len to mean "everything onward" */ | 712 | /* per custom, you can pass zero for len to mean "everything onward" */ |
716 | if (!pin.len) | 713 | if (!pin.len) |
717 | pin.len = PAGE_ALIGN(asma->size) - pin.offset; | 714 | pin.len = PAGE_ALIGN(asma->size) - pin.offset; |
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index e618a87521a3..9d733471ca2e 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c | |||
@@ -475,8 +475,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s, | |||
475 | struct comedi_cmd *cmd = &async->cmd; | 475 | struct comedi_cmd *cmd = &async->cmd; |
476 | 476 | ||
477 | if (cmd->stop_src == TRIG_COUNT) { | 477 | if (cmd->stop_src == TRIG_COUNT) { |
478 | unsigned int nscans = nsamples / cmd->scan_end_arg; | 478 | unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg); |
479 | unsigned int scans_left = __comedi_nscans_left(s, nscans); | ||
480 | unsigned int scan_pos = | 479 | unsigned int scan_pos = |
481 | comedi_bytes_to_samples(s, async->scan_progress); | 480 | comedi_bytes_to_samples(s, async->scan_progress); |
482 | unsigned long long samples_left = 0; | 481 | unsigned long long samples_left = 0; |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 5c0e59e8fe46..cbe98bc2b998 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
@@ -2180,6 +2180,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, | |||
2180 | } | 2180 | } |
2181 | if (tty_hung_up_p(file)) | 2181 | if (tty_hung_up_p(file)) |
2182 | break; | 2182 | break; |
2183 | /* | ||
2184 | * Abort readers for ttys which never actually | ||
2185 | * get hung up. See __tty_hangup(). | ||
2186 | */ | ||
2187 | if (test_bit(TTY_HUPPING, &tty->flags)) | ||
2188 | break; | ||
2183 | if (!timeout) | 2189 | if (!timeout) |
2184 | break; | 2190 | break; |
2185 | if (file->f_flags & O_NONBLOCK) { | 2191 | if (file->f_flags & O_NONBLOCK) { |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 881730cd48c1..3296a05cda2d 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -3384,11 +3384,9 @@ static int serial_pci_is_class_communication(struct pci_dev *dev) | |||
3384 | /* | 3384 | /* |
3385 | * If it is not a communications device or the programming | 3385 | * If it is not a communications device or the programming |
3386 | * interface is greater than 6, give up. | 3386 | * interface is greater than 6, give up. |
3387 | * | ||
3388 | * (Should we try to make guesses for multiport serial devices | ||
3389 | * later?) | ||
3390 | */ | 3387 | */ |
3391 | if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && | 3388 | if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && |
3389 | ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MULTISERIAL) && | ||
3392 | ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || | 3390 | ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || |
3393 | (dev->class & 0xff) > 6) | 3391 | (dev->class & 0xff) > 6) |
3394 | return -ENODEV; | 3392 | return -ENODEV; |
@@ -3425,6 +3423,12 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) | |||
3425 | { | 3423 | { |
3426 | int num_iomem, num_port, first_port = -1, i; | 3424 | int num_iomem, num_port, first_port = -1, i; |
3427 | 3425 | ||
3426 | /* | ||
3427 | * Should we try to make guesses for multiport serial devices later? | ||
3428 | */ | ||
3429 | if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_MULTISERIAL) | ||
3430 | return -ENODEV; | ||
3431 | |||
3428 | num_iomem = num_port = 0; | 3432 | num_iomem = num_port = 0; |
3429 | for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { | 3433 | for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { |
3430 | if (pci_resource_flags(dev, i) & IORESOURCE_IO) { | 3434 | if (pci_resource_flags(dev, i) & IORESOURCE_IO) { |
@@ -4696,6 +4700,17 @@ static const struct pci_device_id serial_pci_tbl[] = { | |||
4696 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ | 4700 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ |
4697 | pbn_b2_4_115200 }, | 4701 | pbn_b2_4_115200 }, |
4698 | /* | 4702 | /* |
4703 | * BrainBoxes UC-260 | ||
4704 | */ | ||
4705 | { PCI_VENDOR_ID_INTASHIELD, 0x0D21, | ||
4706 | PCI_ANY_ID, PCI_ANY_ID, | ||
4707 | PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, | ||
4708 | pbn_b2_4_115200 }, | ||
4709 | { PCI_VENDOR_ID_INTASHIELD, 0x0E34, | ||
4710 | PCI_ANY_ID, PCI_ANY_ID, | ||
4711 | PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, | ||
4712 | pbn_b2_4_115200 }, | ||
4713 | /* | ||
4699 | * Perle PCI-RAS cards | 4714 | * Perle PCI-RAS cards |
4700 | */ | 4715 | */ |
4701 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, | 4716 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index df46a9e88c34..e287fe8f10fc 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -1734,6 +1734,7 @@ static void atmel_get_ip_name(struct uart_port *port) | |||
1734 | switch (version) { | 1734 | switch (version) { |
1735 | case 0x302: | 1735 | case 0x302: |
1736 | case 0x10213: | 1736 | case 0x10213: |
1737 | case 0x10302: | ||
1737 | dev_dbg(port->dev, "This version is usart\n"); | 1738 | dev_dbg(port->dev, "This version is usart\n"); |
1738 | atmel_port->has_frac_baudrate = true; | 1739 | atmel_port->has_frac_baudrate = true; |
1739 | atmel_port->has_hw_timer = true; | 1740 | atmel_port->has_hw_timer = true; |
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index 870e84fb6e39..a24278380fec 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c | |||
@@ -245,11 +245,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match, | |||
245 | } | 245 | } |
246 | port->mapbase = addr; | 246 | port->mapbase = addr; |
247 | port->uartclk = BASE_BAUD * 16; | 247 | port->uartclk = BASE_BAUD * 16; |
248 | port->membase = earlycon_map(port->mapbase, SZ_4K); | ||
249 | 248 | ||
250 | val = of_get_flat_dt_prop(node, "reg-offset", NULL); | 249 | val = of_get_flat_dt_prop(node, "reg-offset", NULL); |
251 | if (val) | 250 | if (val) |
252 | port->mapbase += be32_to_cpu(*val); | 251 | port->mapbase += be32_to_cpu(*val); |
252 | port->membase = earlycon_map(port->mapbase, SZ_4K); | ||
253 | |||
253 | val = of_get_flat_dt_prop(node, "reg-shift", NULL); | 254 | val = of_get_flat_dt_prop(node, "reg-shift", NULL); |
254 | if (val) | 255 | if (val) |
255 | port->regshift = be32_to_cpu(*val); | 256 | port->regshift = be32_to_cpu(*val); |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 552fd050f2bb..91f3a1a5cb7f 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -2262,7 +2262,7 @@ static int imx_uart_probe(struct platform_device *pdev) | |||
2262 | uart_get_rs485_mode(&pdev->dev, &sport->port.rs485); | 2262 | uart_get_rs485_mode(&pdev->dev, &sport->port.rs485); |
2263 | 2263 | ||
2264 | if (sport->port.rs485.flags & SER_RS485_ENABLED && | 2264 | if (sport->port.rs485.flags & SER_RS485_ENABLED && |
2265 | (!sport->have_rtscts || !sport->have_rtsgpio)) | 2265 | (!sport->have_rtscts && !sport->have_rtsgpio)) |
2266 | dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); | 2266 | dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); |
2267 | 2267 | ||
2268 | imx_uart_rs485_config(&sport->port, &sport->port.rs485); | 2268 | imx_uart_rs485_config(&sport->port, &sport->port.rs485); |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index c8dde56b532b..35b9201db3b4 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c | |||
@@ -1144,6 +1144,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state) | |||
1144 | uport->ops->config_port(uport, flags); | 1144 | uport->ops->config_port(uport, flags); |
1145 | 1145 | ||
1146 | ret = uart_startup(tty, state, 1); | 1146 | ret = uart_startup(tty, state, 1); |
1147 | if (ret == 0) | ||
1148 | tty_port_set_initialized(port, true); | ||
1147 | if (ret > 0) | 1149 | if (ret > 0) |
1148 | ret = 0; | 1150 | ret = 0; |
1149 | } | 1151 | } |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 0ec2d938011d..fdbbff547106 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -886,6 +886,8 @@ static void sci_receive_chars(struct uart_port *port) | |||
886 | /* Tell the rest of the system the news. New characters! */ | 886 | /* Tell the rest of the system the news. New characters! */ |
887 | tty_flip_buffer_push(tport); | 887 | tty_flip_buffer_push(tport); |
888 | } else { | 888 | } else { |
889 | /* TTY buffers full; read from RX reg to prevent lockup */ | ||
890 | serial_port_in(port, SCxRDR); | ||
889 | serial_port_in(port, SCxSR); /* dummy read */ | 891 | serial_port_in(port, SCxSR); /* dummy read */ |
890 | sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); | 892 | sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); |
891 | } | 893 | } |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index eb9133b472f4..63114ea35ec1 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -586,6 +586,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session) | |||
586 | return; | 586 | return; |
587 | } | 587 | } |
588 | 588 | ||
589 | /* | ||
590 | * Some console devices aren't actually hung up for technical and | ||
591 | * historical reasons, which can lead to indefinite interruptible | ||
592 | * sleep in n_tty_read(). The following explicitly tells | ||
593 | * n_tty_read() to abort readers. | ||
594 | */ | ||
595 | set_bit(TTY_HUPPING, &tty->flags); | ||
596 | |||
589 | /* inuse_filps is protected by the single tty lock, | 597 | /* inuse_filps is protected by the single tty lock, |
590 | this really needs to change if we want to flush the | 598 | this really needs to change if we want to flush the |
591 | workqueue with the lock held */ | 599 | workqueue with the lock held */ |
@@ -640,6 +648,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session) | |||
640 | * from the ldisc side, which is now guaranteed. | 648 | * from the ldisc side, which is now guaranteed. |
641 | */ | 649 | */ |
642 | set_bit(TTY_HUPPED, &tty->flags); | 650 | set_bit(TTY_HUPPED, &tty->flags); |
651 | clear_bit(TTY_HUPPING, &tty->flags); | ||
643 | tty_unlock(tty); | 652 | tty_unlock(tty); |
644 | 653 | ||
645 | if (f) | 654 | if (f) |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index c64cf6c4a83d..0c11d40a12bc 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -151,6 +151,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, | |||
151 | 151 | ||
152 | ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); | 152 | ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); |
153 | 153 | ||
154 | /* Linger a bit, prior to the next control message. */ | ||
155 | if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) | ||
156 | msleep(200); | ||
157 | |||
154 | kfree(dr); | 158 | kfree(dr); |
155 | 159 | ||
156 | return ret; | 160 | return ret; |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index f4a548471f0f..54b019e267c5 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -230,7 +230,8 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
230 | { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, | 230 | { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, |
231 | 231 | ||
232 | /* Corsair Strafe RGB */ | 232 | /* Corsair Strafe RGB */ |
233 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, | 233 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | |
234 | USB_QUIRK_DELAY_CTRL_MSG }, | ||
234 | 235 | ||
235 | /* Corsair K70 LUX */ | 236 | /* Corsair K70 LUX */ |
236 | { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, | 237 | { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, |
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 03fd20f0b496..c4a47496d2fb 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c | |||
@@ -137,7 +137,7 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg) | |||
137 | p->activate_stm_fs_transceiver = true; | 137 | p->activate_stm_fs_transceiver = true; |
138 | } | 138 | } |
139 | 139 | ||
140 | static void dwc2_set_stm32f7xx_hsotg_params(struct dwc2_hsotg *hsotg) | 140 | static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg) |
141 | { | 141 | { |
142 | struct dwc2_core_params *p = &hsotg->params; | 142 | struct dwc2_core_params *p = &hsotg->params; |
143 | 143 | ||
@@ -164,8 +164,8 @@ const struct of_device_id dwc2_of_match_table[] = { | |||
164 | { .compatible = "st,stm32f4x9-fsotg", | 164 | { .compatible = "st,stm32f4x9-fsotg", |
165 | .data = dwc2_set_stm32f4x9_fsotg_params }, | 165 | .data = dwc2_set_stm32f4x9_fsotg_params }, |
166 | { .compatible = "st,stm32f4x9-hsotg" }, | 166 | { .compatible = "st,stm32f4x9-hsotg" }, |
167 | { .compatible = "st,stm32f7xx-hsotg", | 167 | { .compatible = "st,stm32f7-hsotg", |
168 | .data = dwc2_set_stm32f7xx_hsotg_params }, | 168 | .data = dwc2_set_stm32f7_hsotg_params }, |
169 | {}, | 169 | {}, |
170 | }; | 170 | }; |
171 | MODULE_DEVICE_TABLE(of, dwc2_of_match_table); | 171 | MODULE_DEVICE_TABLE(of, dwc2_of_match_table); |
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index f1d838a4acd6..e94bf91cc58a 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c | |||
@@ -175,7 +175,7 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode) | |||
175 | dwc->desired_dr_role = mode; | 175 | dwc->desired_dr_role = mode; |
176 | spin_unlock_irqrestore(&dwc->lock, flags); | 176 | spin_unlock_irqrestore(&dwc->lock, flags); |
177 | 177 | ||
178 | queue_work(system_power_efficient_wq, &dwc->drd_work); | 178 | queue_work(system_freezable_wq, &dwc->drd_work); |
179 | } | 179 | } |
180 | 180 | ||
181 | u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) | 181 | u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index c2592d883f67..d2428a9e8900 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -1538,7 +1538,6 @@ ffs_fs_kill_sb(struct super_block *sb) | |||
1538 | if (sb->s_fs_info) { | 1538 | if (sb->s_fs_info) { |
1539 | ffs_release_dev(sb->s_fs_info); | 1539 | ffs_release_dev(sb->s_fs_info); |
1540 | ffs_data_closed(sb->s_fs_info); | 1540 | ffs_data_closed(sb->s_fs_info); |
1541 | ffs_data_put(sb->s_fs_info); | ||
1542 | } | 1541 | } |
1543 | } | 1542 | } |
1544 | 1543 | ||
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 84f88fa411cd..d088c340e4d0 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -447,7 +447,8 @@ static int ohci_init (struct ohci_hcd *ohci) | |||
447 | struct usb_hcd *hcd = ohci_to_hcd(ohci); | 447 | struct usb_hcd *hcd = ohci_to_hcd(ohci); |
448 | 448 | ||
449 | /* Accept arbitrarily long scatter-gather lists */ | 449 | /* Accept arbitrarily long scatter-gather lists */ |
450 | hcd->self.sg_tablesize = ~0; | 450 | if (!(hcd->driver->flags & HCD_LOCAL_MEM)) |
451 | hcd->self.sg_tablesize = ~0; | ||
451 | 452 | ||
452 | if (distrust_firmware) | 453 | if (distrust_firmware) |
453 | ohci->flags |= OHCI_QUIRK_HUB_POWER; | 454 | ohci->flags |= OHCI_QUIRK_HUB_POWER; |
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index a1ab8acf39ba..c359bae7b754 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c | |||
@@ -328,13 +328,14 @@ dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req) | |||
328 | int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, | 328 | int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, |
329 | gfp_t gfp_flags) | 329 | gfp_t gfp_flags) |
330 | { | 330 | { |
331 | unsigned long flags; | ||
331 | struct xhci_dbc *dbc = dep->dbc; | 332 | struct xhci_dbc *dbc = dep->dbc; |
332 | int ret = -ESHUTDOWN; | 333 | int ret = -ESHUTDOWN; |
333 | 334 | ||
334 | spin_lock(&dbc->lock); | 335 | spin_lock_irqsave(&dbc->lock, flags); |
335 | if (dbc->state == DS_CONFIGURED) | 336 | if (dbc->state == DS_CONFIGURED) |
336 | ret = dbc_ep_do_queue(dep, req); | 337 | ret = dbc_ep_do_queue(dep, req); |
337 | spin_unlock(&dbc->lock); | 338 | spin_unlock_irqrestore(&dbc->lock, flags); |
338 | 339 | ||
339 | mod_delayed_work(system_wq, &dbc->event_work, 0); | 340 | mod_delayed_work(system_wq, &dbc->event_work, 0); |
340 | 341 | ||
@@ -521,15 +522,16 @@ static void xhci_do_dbc_stop(struct xhci_hcd *xhci) | |||
521 | static int xhci_dbc_start(struct xhci_hcd *xhci) | 522 | static int xhci_dbc_start(struct xhci_hcd *xhci) |
522 | { | 523 | { |
523 | int ret; | 524 | int ret; |
525 | unsigned long flags; | ||
524 | struct xhci_dbc *dbc = xhci->dbc; | 526 | struct xhci_dbc *dbc = xhci->dbc; |
525 | 527 | ||
526 | WARN_ON(!dbc); | 528 | WARN_ON(!dbc); |
527 | 529 | ||
528 | pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); | 530 | pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); |
529 | 531 | ||
530 | spin_lock(&dbc->lock); | 532 | spin_lock_irqsave(&dbc->lock, flags); |
531 | ret = xhci_do_dbc_start(xhci); | 533 | ret = xhci_do_dbc_start(xhci); |
532 | spin_unlock(&dbc->lock); | 534 | spin_unlock_irqrestore(&dbc->lock, flags); |
533 | 535 | ||
534 | if (ret) { | 536 | if (ret) { |
535 | pm_runtime_put(xhci_to_hcd(xhci)->self.controller); | 537 | pm_runtime_put(xhci_to_hcd(xhci)->self.controller); |
@@ -541,6 +543,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci) | |||
541 | 543 | ||
542 | static void xhci_dbc_stop(struct xhci_hcd *xhci) | 544 | static void xhci_dbc_stop(struct xhci_hcd *xhci) |
543 | { | 545 | { |
546 | unsigned long flags; | ||
544 | struct xhci_dbc *dbc = xhci->dbc; | 547 | struct xhci_dbc *dbc = xhci->dbc; |
545 | struct dbc_port *port = &dbc->port; | 548 | struct dbc_port *port = &dbc->port; |
546 | 549 | ||
@@ -551,9 +554,9 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci) | |||
551 | if (port->registered) | 554 | if (port->registered) |
552 | xhci_dbc_tty_unregister_device(xhci); | 555 | xhci_dbc_tty_unregister_device(xhci); |
553 | 556 | ||
554 | spin_lock(&dbc->lock); | 557 | spin_lock_irqsave(&dbc->lock, flags); |
555 | xhci_do_dbc_stop(xhci); | 558 | xhci_do_dbc_stop(xhci); |
556 | spin_unlock(&dbc->lock); | 559 | spin_unlock_irqrestore(&dbc->lock, flags); |
557 | 560 | ||
558 | pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); | 561 | pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); |
559 | } | 562 | } |
@@ -779,14 +782,15 @@ static void xhci_dbc_handle_events(struct work_struct *work) | |||
779 | int ret; | 782 | int ret; |
780 | enum evtreturn evtr; | 783 | enum evtreturn evtr; |
781 | struct xhci_dbc *dbc; | 784 | struct xhci_dbc *dbc; |
785 | unsigned long flags; | ||
782 | struct xhci_hcd *xhci; | 786 | struct xhci_hcd *xhci; |
783 | 787 | ||
784 | dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); | 788 | dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); |
785 | xhci = dbc->xhci; | 789 | xhci = dbc->xhci; |
786 | 790 | ||
787 | spin_lock(&dbc->lock); | 791 | spin_lock_irqsave(&dbc->lock, flags); |
788 | evtr = xhci_dbc_do_handle_events(dbc); | 792 | evtr = xhci_dbc_do_handle_events(dbc); |
789 | spin_unlock(&dbc->lock); | 793 | spin_unlock_irqrestore(&dbc->lock, flags); |
790 | 794 | ||
791 | switch (evtr) { | 795 | switch (evtr) { |
792 | case EVT_GSER: | 796 | case EVT_GSER: |
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index 8d47b6fbf973..75f0b92694ba 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c | |||
@@ -92,21 +92,23 @@ static void dbc_start_rx(struct dbc_port *port) | |||
92 | static void | 92 | static void |
93 | dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req) | 93 | dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req) |
94 | { | 94 | { |
95 | unsigned long flags; | ||
95 | struct xhci_dbc *dbc = xhci->dbc; | 96 | struct xhci_dbc *dbc = xhci->dbc; |
96 | struct dbc_port *port = &dbc->port; | 97 | struct dbc_port *port = &dbc->port; |
97 | 98 | ||
98 | spin_lock(&port->port_lock); | 99 | spin_lock_irqsave(&port->port_lock, flags); |
99 | list_add_tail(&req->list_pool, &port->read_queue); | 100 | list_add_tail(&req->list_pool, &port->read_queue); |
100 | tasklet_schedule(&port->push); | 101 | tasklet_schedule(&port->push); |
101 | spin_unlock(&port->port_lock); | 102 | spin_unlock_irqrestore(&port->port_lock, flags); |
102 | } | 103 | } |
103 | 104 | ||
104 | static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req) | 105 | static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req) |
105 | { | 106 | { |
107 | unsigned long flags; | ||
106 | struct xhci_dbc *dbc = xhci->dbc; | 108 | struct xhci_dbc *dbc = xhci->dbc; |
107 | struct dbc_port *port = &dbc->port; | 109 | struct dbc_port *port = &dbc->port; |
108 | 110 | ||
109 | spin_lock(&port->port_lock); | 111 | spin_lock_irqsave(&port->port_lock, flags); |
110 | list_add(&req->list_pool, &port->write_pool); | 112 | list_add(&req->list_pool, &port->write_pool); |
111 | switch (req->status) { | 113 | switch (req->status) { |
112 | case 0: | 114 | case 0: |
@@ -119,7 +121,7 @@ static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req) | |||
119 | req->status); | 121 | req->status); |
120 | break; | 122 | break; |
121 | } | 123 | } |
122 | spin_unlock(&port->port_lock); | 124 | spin_unlock_irqrestore(&port->port_lock, flags); |
123 | } | 125 | } |
124 | 126 | ||
125 | static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req) | 127 | static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req) |
@@ -327,12 +329,13 @@ static void dbc_rx_push(unsigned long _port) | |||
327 | { | 329 | { |
328 | struct dbc_request *req; | 330 | struct dbc_request *req; |
329 | struct tty_struct *tty; | 331 | struct tty_struct *tty; |
332 | unsigned long flags; | ||
330 | bool do_push = false; | 333 | bool do_push = false; |
331 | bool disconnect = false; | 334 | bool disconnect = false; |
332 | struct dbc_port *port = (void *)_port; | 335 | struct dbc_port *port = (void *)_port; |
333 | struct list_head *queue = &port->read_queue; | 336 | struct list_head *queue = &port->read_queue; |
334 | 337 | ||
335 | spin_lock_irq(&port->port_lock); | 338 | spin_lock_irqsave(&port->port_lock, flags); |
336 | tty = port->port.tty; | 339 | tty = port->port.tty; |
337 | while (!list_empty(queue)) { | 340 | while (!list_empty(queue)) { |
338 | req = list_first_entry(queue, struct dbc_request, list_pool); | 341 | req = list_first_entry(queue, struct dbc_request, list_pool); |
@@ -392,16 +395,17 @@ static void dbc_rx_push(unsigned long _port) | |||
392 | if (!disconnect) | 395 | if (!disconnect) |
393 | dbc_start_rx(port); | 396 | dbc_start_rx(port); |
394 | 397 | ||
395 | spin_unlock_irq(&port->port_lock); | 398 | spin_unlock_irqrestore(&port->port_lock, flags); |
396 | } | 399 | } |
397 | 400 | ||
398 | static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) | 401 | static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) |
399 | { | 402 | { |
403 | unsigned long flags; | ||
400 | struct dbc_port *port = container_of(_port, struct dbc_port, port); | 404 | struct dbc_port *port = container_of(_port, struct dbc_port, port); |
401 | 405 | ||
402 | spin_lock_irq(&port->port_lock); | 406 | spin_lock_irqsave(&port->port_lock, flags); |
403 | dbc_start_rx(port); | 407 | dbc_start_rx(port); |
404 | spin_unlock_irq(&port->port_lock); | 408 | spin_unlock_irqrestore(&port->port_lock, flags); |
405 | 409 | ||
406 | return 0; | 410 | return 0; |
407 | } | 411 | } |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 5262fa571a5d..d9f831b67e57 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -126,6 +126,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
126 | if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) | 126 | if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) |
127 | xhci->quirks |= XHCI_AMD_PLL_FIX; | 127 | xhci->quirks |= XHCI_AMD_PLL_FIX; |
128 | 128 | ||
129 | if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb) | ||
130 | xhci->quirks |= XHCI_SUSPEND_DELAY; | ||
131 | |||
129 | if (pdev->vendor == PCI_VENDOR_ID_AMD) | 132 | if (pdev->vendor == PCI_VENDOR_ID_AMD) |
130 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | 133 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
131 | 134 | ||
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 6f038306c14d..6652e2d5bd2e 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
@@ -360,7 +360,6 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) | |||
360 | { | 360 | { |
361 | struct usb_hcd *hcd = dev_get_drvdata(dev); | 361 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
362 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 362 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
363 | int ret; | ||
364 | 363 | ||
365 | /* | 364 | /* |
366 | * xhci_suspend() needs `do_wakeup` to know whether host is allowed | 365 | * xhci_suspend() needs `do_wakeup` to know whether host is allowed |
@@ -370,12 +369,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) | |||
370 | * reconsider this when xhci_plat_suspend enlarges its scope, e.g., | 369 | * reconsider this when xhci_plat_suspend enlarges its scope, e.g., |
371 | * also applies to runtime suspend. | 370 | * also applies to runtime suspend. |
372 | */ | 371 | */ |
373 | ret = xhci_suspend(xhci, device_may_wakeup(dev)); | 372 | return xhci_suspend(xhci, device_may_wakeup(dev)); |
374 | |||
375 | if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk)) | ||
376 | clk_disable_unprepare(xhci->clk); | ||
377 | |||
378 | return ret; | ||
379 | } | 373 | } |
380 | 374 | ||
381 | static int __maybe_unused xhci_plat_resume(struct device *dev) | 375 | static int __maybe_unused xhci_plat_resume(struct device *dev) |
@@ -384,9 +378,6 @@ static int __maybe_unused xhci_plat_resume(struct device *dev) | |||
384 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 378 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
385 | int ret; | 379 | int ret; |
386 | 380 | ||
387 | if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk)) | ||
388 | clk_prepare_enable(xhci->clk); | ||
389 | |||
390 | ret = xhci_priv_resume_quirk(hcd); | 381 | ret = xhci_priv_resume_quirk(hcd); |
391 | if (ret) | 382 | if (ret) |
392 | return ret; | 383 | return ret; |
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index f0b559660007..f33ffc2bc4ed 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c | |||
@@ -83,6 +83,10 @@ static const struct soc_device_attribute rcar_quirks_match[] = { | |||
83 | .soc_id = "r8a7796", | 83 | .soc_id = "r8a7796", |
84 | .data = (void *)RCAR_XHCI_FIRMWARE_V3, | 84 | .data = (void *)RCAR_XHCI_FIRMWARE_V3, |
85 | }, | 85 | }, |
86 | { | ||
87 | .soc_id = "r8a77965", | ||
88 | .data = (void *)RCAR_XHCI_FIRMWARE_V3, | ||
89 | }, | ||
86 | { /* sentinel */ }, | 90 | { /* sentinel */ }, |
87 | }; | 91 | }; |
88 | 92 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 25d4b748a56f..5d37700ae4b0 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -877,6 +877,9 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) | |||
877 | clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); | 877 | clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); |
878 | del_timer_sync(&xhci->shared_hcd->rh_timer); | 878 | del_timer_sync(&xhci->shared_hcd->rh_timer); |
879 | 879 | ||
880 | if (xhci->quirks & XHCI_SUSPEND_DELAY) | ||
881 | usleep_range(1000, 1500); | ||
882 | |||
880 | spin_lock_irq(&xhci->lock); | 883 | spin_lock_irq(&xhci->lock); |
881 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 884 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
882 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | 885 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index e4d7d3d06a75..866e141d4972 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -718,11 +718,12 @@ struct xhci_ep_ctx { | |||
718 | /* bits 10:14 are Max Primary Streams */ | 718 | /* bits 10:14 are Max Primary Streams */ |
719 | /* bit 15 is Linear Stream Array */ | 719 | /* bit 15 is Linear Stream Array */ |
720 | /* Interval - period between requests to an endpoint - 125u increments. */ | 720 | /* Interval - period between requests to an endpoint - 125u increments. */ |
721 | #define EP_INTERVAL(p) (((p) & 0xff) << 16) | 721 | #define EP_INTERVAL(p) (((p) & 0xff) << 16) |
722 | #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) | 722 | #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) |
723 | #define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) | 723 | #define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) |
724 | #define EP_MAXPSTREAMS_MASK (0x1f << 10) | 724 | #define EP_MAXPSTREAMS_MASK (0x1f << 10) |
725 | #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) | 725 | #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) |
726 | #define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10) | ||
726 | /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ | 727 | /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ |
727 | #define EP_HAS_LSA (1 << 15) | 728 | #define EP_HAS_LSA (1 << 15) |
728 | /* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ | 729 | /* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ |
@@ -1825,6 +1826,7 @@ struct xhci_hcd { | |||
1825 | #define XHCI_U2_DISABLE_WAKE (1 << 27) | 1826 | #define XHCI_U2_DISABLE_WAKE (1 << 27) |
1826 | #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) | 1827 | #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) |
1827 | #define XHCI_HW_LPM_DISABLE (1 << 29) | 1828 | #define XHCI_HW_LPM_DISABLE (1 << 29) |
1829 | #define XHCI_SUSPEND_DELAY (1 << 30) | ||
1828 | 1830 | ||
1829 | unsigned int num_active_eps; | 1831 | unsigned int num_active_eps; |
1830 | unsigned int limit_active_eps; | 1832 | unsigned int limit_active_eps; |
@@ -2549,21 +2551,22 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq, | |||
2549 | u8 burst; | 2551 | u8 burst; |
2550 | u8 cerr; | 2552 | u8 cerr; |
2551 | u8 mult; | 2553 | u8 mult; |
2552 | u8 lsa; | 2554 | |
2553 | u8 hid; | 2555 | bool lsa; |
2556 | bool hid; | ||
2554 | 2557 | ||
2555 | esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | | 2558 | esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | |
2556 | CTX_TO_MAX_ESIT_PAYLOAD(tx_info); | 2559 | CTX_TO_MAX_ESIT_PAYLOAD(tx_info); |
2557 | 2560 | ||
2558 | ep_state = info & EP_STATE_MASK; | 2561 | ep_state = info & EP_STATE_MASK; |
2559 | max_pstr = info & EP_MAXPSTREAMS_MASK; | 2562 | max_pstr = CTX_TO_EP_MAXPSTREAMS(info); |
2560 | interval = CTX_TO_EP_INTERVAL(info); | 2563 | interval = CTX_TO_EP_INTERVAL(info); |
2561 | mult = CTX_TO_EP_MULT(info) + 1; | 2564 | mult = CTX_TO_EP_MULT(info) + 1; |
2562 | lsa = info & EP_HAS_LSA; | 2565 | lsa = !!(info & EP_HAS_LSA); |
2563 | 2566 | ||
2564 | cerr = (info2 & (3 << 1)) >> 1; | 2567 | cerr = (info2 & (3 << 1)) >> 1; |
2565 | ep_type = CTX_TO_EP_TYPE(info2); | 2568 | ep_type = CTX_TO_EP_TYPE(info2); |
2566 | hid = info2 & (1 << 7); | 2569 | hid = !!(info2 & (1 << 7)); |
2567 | burst = CTX_TO_MAX_BURST(info2); | 2570 | burst = CTX_TO_MAX_BURST(info2); |
2568 | maxp = MAX_PACKET_DECODED(info2); | 2571 | maxp = MAX_PACKET_DECODED(info2); |
2569 | 2572 | ||
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index f5e1bb5e5217..984f7e12a6a5 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c | |||
@@ -85,6 +85,8 @@ struct mon_reader_text { | |||
85 | 85 | ||
86 | wait_queue_head_t wait; | 86 | wait_queue_head_t wait; |
87 | int printf_size; | 87 | int printf_size; |
88 | size_t printf_offset; | ||
89 | size_t printf_togo; | ||
88 | char *printf_buf; | 90 | char *printf_buf; |
89 | struct mutex printf_lock; | 91 | struct mutex printf_lock; |
90 | 92 | ||
@@ -376,75 +378,103 @@ err_alloc: | |||
376 | return rc; | 378 | return rc; |
377 | } | 379 | } |
378 | 380 | ||
379 | /* | 381 | static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp, |
380 | * For simplicity, we read one record in one system call and throw out | 382 | char __user * const buf, const size_t nbytes) |
381 | * what does not fit. This means that the following does not work: | 383 | { |
382 | * dd if=/dbg/usbmon/0t bs=10 | 384 | const size_t togo = min(nbytes, rp->printf_togo); |
383 | * Also, we do not allow seeks and do not bother advancing the offset. | 385 | |
384 | */ | 386 | if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo)) |
387 | return -EFAULT; | ||
388 | rp->printf_togo -= togo; | ||
389 | rp->printf_offset += togo; | ||
390 | return togo; | ||
391 | } | ||
392 | |||
393 | /* ppos is not advanced since the llseek operation is not permitted. */ | ||
385 | static ssize_t mon_text_read_t(struct file *file, char __user *buf, | 394 | static ssize_t mon_text_read_t(struct file *file, char __user *buf, |
386 | size_t nbytes, loff_t *ppos) | 395 | size_t nbytes, loff_t *ppos) |
387 | { | 396 | { |
388 | struct mon_reader_text *rp = file->private_data; | 397 | struct mon_reader_text *rp = file->private_data; |
389 | struct mon_event_text *ep; | 398 | struct mon_event_text *ep; |
390 | struct mon_text_ptr ptr; | 399 | struct mon_text_ptr ptr; |
400 | ssize_t ret; | ||
391 | 401 | ||
392 | ep = mon_text_read_wait(rp, file); | ||
393 | if (IS_ERR(ep)) | ||
394 | return PTR_ERR(ep); | ||
395 | mutex_lock(&rp->printf_lock); | 402 | mutex_lock(&rp->printf_lock); |
396 | ptr.cnt = 0; | 403 | |
397 | ptr.pbuf = rp->printf_buf; | 404 | if (rp->printf_togo == 0) { |
398 | ptr.limit = rp->printf_size; | 405 | |
399 | 406 | ep = mon_text_read_wait(rp, file); | |
400 | mon_text_read_head_t(rp, &ptr, ep); | 407 | if (IS_ERR(ep)) { |
401 | mon_text_read_statset(rp, &ptr, ep); | 408 | mutex_unlock(&rp->printf_lock); |
402 | ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, | 409 | return PTR_ERR(ep); |
403 | " %d", ep->length); | 410 | } |
404 | mon_text_read_data(rp, &ptr, ep); | 411 | ptr.cnt = 0; |
405 | 412 | ptr.pbuf = rp->printf_buf; | |
406 | if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) | 413 | ptr.limit = rp->printf_size; |
407 | ptr.cnt = -EFAULT; | 414 | |
415 | mon_text_read_head_t(rp, &ptr, ep); | ||
416 | mon_text_read_statset(rp, &ptr, ep); | ||
417 | ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, | ||
418 | " %d", ep->length); | ||
419 | mon_text_read_data(rp, &ptr, ep); | ||
420 | |||
421 | rp->printf_togo = ptr.cnt; | ||
422 | rp->printf_offset = 0; | ||
423 | |||
424 | kmem_cache_free(rp->e_slab, ep); | ||
425 | } | ||
426 | |||
427 | ret = mon_text_copy_to_user(rp, buf, nbytes); | ||
408 | mutex_unlock(&rp->printf_lock); | 428 | mutex_unlock(&rp->printf_lock); |
409 | kmem_cache_free(rp->e_slab, ep); | 429 | return ret; |
410 | return ptr.cnt; | ||
411 | } | 430 | } |
412 | 431 | ||
432 | /* ppos is not advanced since the llseek operation is not permitted. */ | ||
413 | static ssize_t mon_text_read_u(struct file *file, char __user *buf, | 433 | static ssize_t mon_text_read_u(struct file *file, char __user *buf, |
414 | size_t nbytes, loff_t *ppos) | 434 | size_t nbytes, loff_t *ppos) |
415 | { | 435 | { |
416 | struct mon_reader_text *rp = file->private_data; | 436 | struct mon_reader_text *rp = file->private_data; |
417 | struct mon_event_text *ep; | 437 | struct mon_event_text *ep; |
418 | struct mon_text_ptr ptr; | 438 | struct mon_text_ptr ptr; |
439 | ssize_t ret; | ||
419 | 440 | ||
420 | ep = mon_text_read_wait(rp, file); | ||
421 | if (IS_ERR(ep)) | ||
422 | return PTR_ERR(ep); | ||
423 | mutex_lock(&rp->printf_lock); | 441 | mutex_lock(&rp->printf_lock); |
424 | ptr.cnt = 0; | ||
425 | ptr.pbuf = rp->printf_buf; | ||
426 | ptr.limit = rp->printf_size; | ||
427 | 442 | ||
428 | mon_text_read_head_u(rp, &ptr, ep); | 443 | if (rp->printf_togo == 0) { |
429 | if (ep->type == 'E') { | 444 | |
430 | mon_text_read_statset(rp, &ptr, ep); | 445 | ep = mon_text_read_wait(rp, file); |
431 | } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { | 446 | if (IS_ERR(ep)) { |
432 | mon_text_read_isostat(rp, &ptr, ep); | 447 | mutex_unlock(&rp->printf_lock); |
433 | mon_text_read_isodesc(rp, &ptr, ep); | 448 | return PTR_ERR(ep); |
434 | } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { | 449 | } |
435 | mon_text_read_intstat(rp, &ptr, ep); | 450 | ptr.cnt = 0; |
436 | } else { | 451 | ptr.pbuf = rp->printf_buf; |
437 | mon_text_read_statset(rp, &ptr, ep); | 452 | ptr.limit = rp->printf_size; |
453 | |||
454 | mon_text_read_head_u(rp, &ptr, ep); | ||
455 | if (ep->type == 'E') { | ||
456 | mon_text_read_statset(rp, &ptr, ep); | ||
457 | } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { | ||
458 | mon_text_read_isostat(rp, &ptr, ep); | ||
459 | mon_text_read_isodesc(rp, &ptr, ep); | ||
460 | } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { | ||
461 | mon_text_read_intstat(rp, &ptr, ep); | ||
462 | } else { | ||
463 | mon_text_read_statset(rp, &ptr, ep); | ||
464 | } | ||
465 | ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, | ||
466 | " %d", ep->length); | ||
467 | mon_text_read_data(rp, &ptr, ep); | ||
468 | |||
469 | rp->printf_togo = ptr.cnt; | ||
470 | rp->printf_offset = 0; | ||
471 | |||
472 | kmem_cache_free(rp->e_slab, ep); | ||
438 | } | 473 | } |
439 | ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, | ||
440 | " %d", ep->length); | ||
441 | mon_text_read_data(rp, &ptr, ep); | ||
442 | 474 | ||
443 | if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) | 475 | ret = mon_text_copy_to_user(rp, buf, nbytes); |
444 | ptr.cnt = -EFAULT; | ||
445 | mutex_unlock(&rp->printf_lock); | 476 | mutex_unlock(&rp->printf_lock); |
446 | kmem_cache_free(rp->e_slab, ep); | 477 | return ret; |
447 | return ptr.cnt; | ||
448 | } | 478 | } |
449 | 479 | ||
450 | static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, | 480 | static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index eef4ad578b31..4d723077be2b 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1756,6 +1756,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
1756 | int vbus; | 1756 | int vbus; |
1757 | u8 devctl; | 1757 | u8 devctl; |
1758 | 1758 | ||
1759 | pm_runtime_get_sync(dev); | ||
1759 | spin_lock_irqsave(&musb->lock, flags); | 1760 | spin_lock_irqsave(&musb->lock, flags); |
1760 | val = musb->a_wait_bcon; | 1761 | val = musb->a_wait_bcon; |
1761 | vbus = musb_platform_get_vbus_status(musb); | 1762 | vbus = musb_platform_get_vbus_status(musb); |
@@ -1769,6 +1770,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
1769 | vbus = 0; | 1770 | vbus = 0; |
1770 | } | 1771 | } |
1771 | spin_unlock_irqrestore(&musb->lock, flags); | 1772 | spin_unlock_irqrestore(&musb->lock, flags); |
1773 | pm_runtime_put_sync(dev); | ||
1772 | 1774 | ||
1773 | return sprintf(buf, "Vbus %s, timeout %lu msec\n", | 1775 | return sprintf(buf, "Vbus %s, timeout %lu msec\n", |
1774 | vbus ? "on" : "off", val); | 1776 | vbus ? "on" : "off", val); |
@@ -2471,11 +2473,11 @@ static int musb_remove(struct platform_device *pdev) | |||
2471 | musb_disable_interrupts(musb); | 2473 | musb_disable_interrupts(musb); |
2472 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | 2474 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); |
2473 | spin_unlock_irqrestore(&musb->lock, flags); | 2475 | spin_unlock_irqrestore(&musb->lock, flags); |
2476 | musb_platform_exit(musb); | ||
2474 | 2477 | ||
2475 | pm_runtime_dont_use_autosuspend(musb->controller); | 2478 | pm_runtime_dont_use_autosuspend(musb->controller); |
2476 | pm_runtime_put_sync(musb->controller); | 2479 | pm_runtime_put_sync(musb->controller); |
2477 | pm_runtime_disable(musb->controller); | 2480 | pm_runtime_disable(musb->controller); |
2478 | musb_platform_exit(musb); | ||
2479 | musb_phy_callback = NULL; | 2481 | musb_phy_callback = NULL; |
2480 | if (musb->dma_controller) | 2482 | if (musb->dma_controller) |
2481 | musb_dma_controller_destroy(musb->dma_controller); | 2483 | musb_dma_controller_destroy(musb->dma_controller); |
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 3b1b9695177a..6034c39b67d1 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
@@ -1076,7 +1076,7 @@ static int uas_post_reset(struct usb_interface *intf) | |||
1076 | return 0; | 1076 | return 0; |
1077 | 1077 | ||
1078 | err = uas_configure_endpoints(devinfo); | 1078 | err = uas_configure_endpoints(devinfo); |
1079 | if (err && err != ENODEV) | 1079 | if (err && err != -ENODEV) |
1080 | shost_printk(KERN_ERR, shost, | 1080 | shost_printk(KERN_ERR, shost, |
1081 | "%s: alloc streams error %d after reset", | 1081 | "%s: alloc streams error %d after reset", |
1082 | __func__, err); | 1082 | __func__, err); |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 264af199aec8..747d3a9596d9 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -2118,6 +2118,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, | |||
2118 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 2118 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
2119 | US_FL_BROKEN_FUA ), | 2119 | US_FL_BROKEN_FUA ), |
2120 | 2120 | ||
2121 | /* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */ | ||
2122 | UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117, | ||
2123 | "JMicron", | ||
2124 | "USB to ATA/ATAPI Bridge", | ||
2125 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
2126 | US_FL_BROKEN_FUA ), | ||
2127 | |||
2121 | /* Reported-by George Cherian <george.cherian@cavium.com> */ | 2128 | /* Reported-by George Cherian <george.cherian@cavium.com> */ |
2122 | UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, | 2129 | UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, |
2123 | "JMicron", | 2130 | "JMicron", |
diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c index 9ce4756adad6..dcd8ef085b30 100644 --- a/drivers/usb/typec/fusb302/fusb302.c +++ b/drivers/usb/typec/fusb302/fusb302.c | |||
@@ -1857,7 +1857,8 @@ static int fusb302_probe(struct i2c_client *client, | |||
1857 | chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev); | 1857 | chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev); |
1858 | if (IS_ERR(chip->tcpm_port)) { | 1858 | if (IS_ERR(chip->tcpm_port)) { |
1859 | ret = PTR_ERR(chip->tcpm_port); | 1859 | ret = PTR_ERR(chip->tcpm_port); |
1860 | dev_err(dev, "cannot register tcpm port, ret=%d", ret); | 1860 | if (ret != -EPROBE_DEFER) |
1861 | dev_err(dev, "cannot register tcpm port, ret=%d", ret); | ||
1861 | goto destroy_workqueue; | 1862 | goto destroy_workqueue; |
1862 | } | 1863 | } |
1863 | 1864 | ||
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c index f4d563ee7690..8b637a4b474b 100644 --- a/drivers/usb/typec/tcpm.c +++ b/drivers/usb/typec/tcpm.c | |||
@@ -252,9 +252,6 @@ struct tcpm_port { | |||
252 | unsigned int nr_src_pdo; | 252 | unsigned int nr_src_pdo; |
253 | u32 snk_pdo[PDO_MAX_OBJECTS]; | 253 | u32 snk_pdo[PDO_MAX_OBJECTS]; |
254 | unsigned int nr_snk_pdo; | 254 | unsigned int nr_snk_pdo; |
255 | unsigned int nr_fixed; /* number of fixed sink PDOs */ | ||
256 | unsigned int nr_var; /* number of variable sink PDOs */ | ||
257 | unsigned int nr_batt; /* number of battery sink PDOs */ | ||
258 | u32 snk_vdo[VDO_MAX_OBJECTS]; | 255 | u32 snk_vdo[VDO_MAX_OBJECTS]; |
259 | unsigned int nr_snk_vdo; | 256 | unsigned int nr_snk_vdo; |
260 | 257 | ||
@@ -1770,90 +1767,39 @@ static int tcpm_pd_check_request(struct tcpm_port *port) | |||
1770 | return 0; | 1767 | return 0; |
1771 | } | 1768 | } |
1772 | 1769 | ||
1773 | #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y)) | 1770 | static int tcpm_pd_select_pdo(struct tcpm_port *port) |
1774 | #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y)) | ||
1775 | |||
1776 | static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, | ||
1777 | int *src_pdo) | ||
1778 | { | 1771 | { |
1779 | unsigned int i, j, max_mw = 0, max_mv = 0, mw = 0, mv = 0, ma = 0; | 1772 | unsigned int i, max_mw = 0, max_mv = 0; |
1780 | int ret = -EINVAL; | 1773 | int ret = -EINVAL; |
1781 | 1774 | ||
1782 | /* | 1775 | /* |
1783 | * Select the source PDO providing the most power which has a | 1776 | * Select the source PDO providing the most power while staying within |
1784 | * matchig sink cap. | 1777 | * the board's voltage limits. Prefer PDO providing exp |
1785 | */ | 1778 | */ |
1786 | for (i = 0; i < port->nr_source_caps; i++) { | 1779 | for (i = 0; i < port->nr_source_caps; i++) { |
1787 | u32 pdo = port->source_caps[i]; | 1780 | u32 pdo = port->source_caps[i]; |
1788 | enum pd_pdo_type type = pdo_type(pdo); | 1781 | enum pd_pdo_type type = pdo_type(pdo); |
1782 | unsigned int mv, ma, mw; | ||
1789 | 1783 | ||
1790 | if (type == PDO_TYPE_FIXED) { | 1784 | if (type == PDO_TYPE_FIXED) |
1791 | for (j = 0; j < port->nr_fixed; j++) { | 1785 | mv = pdo_fixed_voltage(pdo); |
1792 | if (pdo_fixed_voltage(pdo) == | 1786 | else |
1793 | pdo_fixed_voltage(port->snk_pdo[j])) { | 1787 | mv = pdo_min_voltage(pdo); |
1794 | ma = min_current(pdo, port->snk_pdo[j]); | 1788 | |
1795 | mv = pdo_fixed_voltage(pdo); | 1789 | if (type == PDO_TYPE_BATT) { |
1796 | mw = ma * mv / 1000; | 1790 | mw = pdo_max_power(pdo); |
1797 | if (mw > max_mw || | 1791 | } else { |
1798 | (mw == max_mw && mv > max_mv)) { | 1792 | ma = min(pdo_max_current(pdo), |
1799 | ret = 0; | 1793 | port->max_snk_ma); |
1800 | *src_pdo = i; | 1794 | mw = ma * mv / 1000; |
1801 | *sink_pdo = j; | 1795 | } |
1802 | max_mw = mw; | 1796 | |
1803 | max_mv = mv; | 1797 | /* Perfer higher voltages if available */ |
1804 | } | 1798 | if ((mw > max_mw || (mw == max_mw && mv > max_mv)) && |
1805 | /* There could only be one fixed pdo | 1799 | mv <= port->max_snk_mv) { |
1806 | * at a specific voltage level. | 1800 | ret = i; |
1807 | * So breaking here. | 1801 | max_mw = mw; |
1808 | */ | 1802 | max_mv = mv; |
1809 | break; | ||
1810 | } | ||
1811 | } | ||
1812 | } else if (type == PDO_TYPE_BATT) { | ||
1813 | for (j = port->nr_fixed; | ||
1814 | j < port->nr_fixed + | ||
1815 | port->nr_batt; | ||
1816 | j++) { | ||
1817 | if (pdo_min_voltage(pdo) >= | ||
1818 | pdo_min_voltage(port->snk_pdo[j]) && | ||
1819 | pdo_max_voltage(pdo) <= | ||
1820 | pdo_max_voltage(port->snk_pdo[j])) { | ||
1821 | mw = min_power(pdo, port->snk_pdo[j]); | ||
1822 | mv = pdo_min_voltage(pdo); | ||
1823 | if (mw > max_mw || | ||
1824 | (mw == max_mw && mv > max_mv)) { | ||
1825 | ret = 0; | ||
1826 | *src_pdo = i; | ||
1827 | *sink_pdo = j; | ||
1828 | max_mw = mw; | ||
1829 | max_mv = mv; | ||
1830 | } | ||
1831 | } | ||
1832 | } | ||
1833 | } else if (type == PDO_TYPE_VAR) { | ||
1834 | for (j = port->nr_fixed + | ||
1835 | port->nr_batt; | ||
1836 | j < port->nr_fixed + | ||
1837 | port->nr_batt + | ||
1838 | port->nr_var; | ||
1839 | j++) { | ||
1840 | if (pdo_min_voltage(pdo) >= | ||
1841 | pdo_min_voltage(port->snk_pdo[j]) && | ||
1842 | pdo_max_voltage(pdo) <= | ||
1843 | pdo_max_voltage(port->snk_pdo[j])) { | ||
1844 | ma = min_current(pdo, port->snk_pdo[j]); | ||
1845 | mv = pdo_min_voltage(pdo); | ||
1846 | mw = ma * mv / 1000; | ||
1847 | if (mw > max_mw || | ||
1848 | (mw == max_mw && mv > max_mv)) { | ||
1849 | ret = 0; | ||
1850 | *src_pdo = i; | ||
1851 | *sink_pdo = j; | ||
1852 | max_mw = mw; | ||
1853 | max_mv = mv; | ||
1854 | } | ||
1855 | } | ||
1856 | } | ||
1857 | } | 1803 | } |
1858 | } | 1804 | } |
1859 | 1805 | ||
@@ -1865,14 +1811,13 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo) | |||
1865 | unsigned int mv, ma, mw, flags; | 1811 | unsigned int mv, ma, mw, flags; |
1866 | unsigned int max_ma, max_mw; | 1812 | unsigned int max_ma, max_mw; |
1867 | enum pd_pdo_type type; | 1813 | enum pd_pdo_type type; |
1868 | int src_pdo_index, snk_pdo_index; | 1814 | int index; |
1869 | u32 pdo, matching_snk_pdo; | 1815 | u32 pdo; |
1870 | 1816 | ||
1871 | if (tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index) < 0) | 1817 | index = tcpm_pd_select_pdo(port); |
1818 | if (index < 0) | ||
1872 | return -EINVAL; | 1819 | return -EINVAL; |
1873 | 1820 | pdo = port->source_caps[index]; | |
1874 | pdo = port->source_caps[src_pdo_index]; | ||
1875 | matching_snk_pdo = port->snk_pdo[snk_pdo_index]; | ||
1876 | type = pdo_type(pdo); | 1821 | type = pdo_type(pdo); |
1877 | 1822 | ||
1878 | if (type == PDO_TYPE_FIXED) | 1823 | if (type == PDO_TYPE_FIXED) |
@@ -1880,28 +1825,26 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo) | |||
1880 | else | 1825 | else |
1881 | mv = pdo_min_voltage(pdo); | 1826 | mv = pdo_min_voltage(pdo); |
1882 | 1827 | ||
1883 | /* Select maximum available current within the sink pdo's limit */ | 1828 | /* Select maximum available current within the board's power limit */ |
1884 | if (type == PDO_TYPE_BATT) { | 1829 | if (type == PDO_TYPE_BATT) { |
1885 | mw = min_power(pdo, matching_snk_pdo); | 1830 | mw = pdo_max_power(pdo); |
1886 | ma = 1000 * mw / mv; | 1831 | ma = 1000 * min(mw, port->max_snk_mw) / mv; |
1887 | } else { | 1832 | } else { |
1888 | ma = min_current(pdo, matching_snk_pdo); | 1833 | ma = min(pdo_max_current(pdo), |
1889 | mw = ma * mv / 1000; | 1834 | 1000 * port->max_snk_mw / mv); |
1890 | } | 1835 | } |
1836 | ma = min(ma, port->max_snk_ma); | ||
1891 | 1837 | ||
1892 | flags = RDO_USB_COMM | RDO_NO_SUSPEND; | 1838 | flags = RDO_USB_COMM | RDO_NO_SUSPEND; |
1893 | 1839 | ||
1894 | /* Set mismatch bit if offered power is less than operating power */ | 1840 | /* Set mismatch bit if offered power is less than operating power */ |
1841 | mw = ma * mv / 1000; | ||
1895 | max_ma = ma; | 1842 | max_ma = ma; |
1896 | max_mw = mw; | 1843 | max_mw = mw; |
1897 | if (mw < port->operating_snk_mw) { | 1844 | if (mw < port->operating_snk_mw) { |
1898 | flags |= RDO_CAP_MISMATCH; | 1845 | flags |= RDO_CAP_MISMATCH; |
1899 | if (type == PDO_TYPE_BATT && | 1846 | max_mw = port->operating_snk_mw; |
1900 | (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo))) | 1847 | max_ma = max_mw * 1000 / mv; |
1901 | max_mw = pdo_max_power(matching_snk_pdo); | ||
1902 | else if (pdo_max_current(matching_snk_pdo) > | ||
1903 | pdo_max_current(pdo)) | ||
1904 | max_ma = pdo_max_current(matching_snk_pdo); | ||
1905 | } | 1848 | } |
1906 | 1849 | ||
1907 | tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d", | 1850 | tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d", |
@@ -1910,16 +1853,16 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo) | |||
1910 | port->polarity); | 1853 | port->polarity); |
1911 | 1854 | ||
1912 | if (type == PDO_TYPE_BATT) { | 1855 | if (type == PDO_TYPE_BATT) { |
1913 | *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags); | 1856 | *rdo = RDO_BATT(index + 1, mw, max_mw, flags); |
1914 | 1857 | ||
1915 | tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s", | 1858 | tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s", |
1916 | src_pdo_index, mv, mw, | 1859 | index, mv, mw, |
1917 | flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); | 1860 | flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); |
1918 | } else { | 1861 | } else { |
1919 | *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags); | 1862 | *rdo = RDO_FIXED(index + 1, ma, max_ma, flags); |
1920 | 1863 | ||
1921 | tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s", | 1864 | tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s", |
1922 | src_pdo_index, mv, ma, | 1865 | index, mv, ma, |
1923 | flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); | 1866 | flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); |
1924 | } | 1867 | } |
1925 | 1868 | ||
@@ -3650,19 +3593,6 @@ int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, | |||
3650 | } | 3593 | } |
3651 | EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities); | 3594 | EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities); |
3652 | 3595 | ||
3653 | static int nr_type_pdos(const u32 *pdo, unsigned int nr_pdo, | ||
3654 | enum pd_pdo_type type) | ||
3655 | { | ||
3656 | int count = 0; | ||
3657 | int i; | ||
3658 | |||
3659 | for (i = 0; i < nr_pdo; i++) { | ||
3660 | if (pdo_type(pdo[i]) == type) | ||
3661 | count++; | ||
3662 | } | ||
3663 | return count; | ||
3664 | } | ||
3665 | |||
3666 | struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) | 3596 | struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) |
3667 | { | 3597 | { |
3668 | struct tcpm_port *port; | 3598 | struct tcpm_port *port; |
@@ -3708,15 +3638,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) | |||
3708 | tcpc->config->nr_src_pdo); | 3638 | tcpc->config->nr_src_pdo); |
3709 | port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, | 3639 | port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, |
3710 | tcpc->config->nr_snk_pdo); | 3640 | tcpc->config->nr_snk_pdo); |
3711 | port->nr_fixed = nr_type_pdos(port->snk_pdo, | ||
3712 | port->nr_snk_pdo, | ||
3713 | PDO_TYPE_FIXED); | ||
3714 | port->nr_var = nr_type_pdos(port->snk_pdo, | ||
3715 | port->nr_snk_pdo, | ||
3716 | PDO_TYPE_VAR); | ||
3717 | port->nr_batt = nr_type_pdos(port->snk_pdo, | ||
3718 | port->nr_snk_pdo, | ||
3719 | PDO_TYPE_BATT); | ||
3720 | port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo, | 3641 | port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo, |
3721 | tcpc->config->nr_snk_vdo); | 3642 | tcpc->config->nr_snk_vdo); |
3722 | 3643 | ||
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index d86f72bbbb91..6dcd3ff655c3 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c | |||
@@ -105,10 +105,14 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a | |||
105 | if (rv != 0) | 105 | if (rv != 0) |
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | 107 | ||
108 | if (!udc) { | ||
109 | dev_err(dev, "no device"); | ||
110 | return -ENODEV; | ||
111 | } | ||
108 | spin_lock_irqsave(&udc->lock, flags); | 112 | spin_lock_irqsave(&udc->lock, flags); |
109 | /* Don't export what we don't have */ | 113 | /* Don't export what we don't have */ |
110 | if (!udc || !udc->driver || !udc->pullup) { | 114 | if (!udc->driver || !udc->pullup) { |
111 | dev_err(dev, "no device or gadget not bound"); | 115 | dev_err(dev, "gadget not bound"); |
112 | ret = -ENODEV; | 116 | ret = -ENODEV; |
113 | goto unlock; | 117 | goto unlock; |
114 | } | 118 | } |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index e30e29ae4819..45657e2b1ff7 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, | |||
338 | { | 338 | { |
339 | struct page *page[1]; | 339 | struct page *page[1]; |
340 | struct vm_area_struct *vma; | 340 | struct vm_area_struct *vma; |
341 | struct vm_area_struct *vmas[1]; | ||
341 | int ret; | 342 | int ret; |
342 | 343 | ||
343 | if (mm == current->mm) { | 344 | if (mm == current->mm) { |
344 | ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), | 345 | ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), |
345 | page); | 346 | page, vmas); |
346 | } else { | 347 | } else { |
347 | unsigned int flags = 0; | 348 | unsigned int flags = 0; |
348 | 349 | ||
@@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, | |||
351 | 352 | ||
352 | down_read(&mm->mmap_sem); | 353 | down_read(&mm->mmap_sem); |
353 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, | 354 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, |
354 | NULL, NULL); | 355 | vmas, NULL); |
356 | /* | ||
357 | * The lifetime of a vaddr_get_pfn() page pin is | ||
358 | * userspace-controlled. In the fs-dax case this could | ||
359 | * lead to indefinite stalls in filesystem operations. | ||
360 | * Disallow attempts to pin fs-dax pages via this | ||
361 | * interface. | ||
362 | */ | ||
363 | if (ret > 0 && vma_is_fsdax(vmas[0])) { | ||
364 | ret = -EOPNOTSUPP; | ||
365 | put_page(page[0]); | ||
366 | } | ||
355 | up_read(&mm->mmap_sem); | 367 | up_read(&mm->mmap_sem); |
356 | } | 368 | } |
357 | 369 | ||
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c index af6fc97f4ba4..a436d44f1b7f 100644 --- a/drivers/video/fbdev/sbuslib.c +++ b/drivers/video/fbdev/sbuslib.c | |||
@@ -122,7 +122,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, | |||
122 | unsigned char __user *ured; | 122 | unsigned char __user *ured; |
123 | unsigned char __user *ugreen; | 123 | unsigned char __user *ugreen; |
124 | unsigned char __user *ublue; | 124 | unsigned char __user *ublue; |
125 | int index, count, i; | 125 | unsigned int index, count, i; |
126 | 126 | ||
127 | if (get_user(index, &c->index) || | 127 | if (get_user(index, &c->index) || |
128 | __get_user(count, &c->count) || | 128 | __get_user(count, &c->count) || |
@@ -161,7 +161,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, | |||
161 | unsigned char __user *ugreen; | 161 | unsigned char __user *ugreen; |
162 | unsigned char __user *ublue; | 162 | unsigned char __user *ublue; |
163 | struct fb_cmap *cmap = &info->cmap; | 163 | struct fb_cmap *cmap = &info->cmap; |
164 | int index, count, i; | 164 | unsigned int index, count, i; |
165 | u8 red, green, blue; | 165 | u8 red, green, blue; |
166 | 166 | ||
167 | if (get_user(index, &c->index) || | 167 | if (get_user(index, &c->index) || |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index eb30f3e09a47..71458f493cf8 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -428,8 +428,6 @@ unmap_release: | |||
428 | i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next); | 428 | i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next); |
429 | } | 429 | } |
430 | 430 | ||
431 | vq->vq.num_free += total_sg; | ||
432 | |||
433 | if (indirect) | 431 | if (indirect) |
434 | kfree(desc); | 432 | kfree(desc); |
435 | 433 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index aff773bcebdb..37460cd6cabb 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -226,6 +226,7 @@ config ZIIRAVE_WATCHDOG | |||
226 | config RAVE_SP_WATCHDOG | 226 | config RAVE_SP_WATCHDOG |
227 | tristate "RAVE SP Watchdog timer" | 227 | tristate "RAVE SP Watchdog timer" |
228 | depends on RAVE_SP_CORE | 228 | depends on RAVE_SP_CORE |
229 | depends on NVMEM || !NVMEM | ||
229 | select WATCHDOG_CORE | 230 | select WATCHDOG_CORE |
230 | help | 231 | help |
231 | Support for the watchdog on RAVE SP device. | 232 | Support for the watchdog on RAVE SP device. |
@@ -903,6 +904,7 @@ config F71808E_WDT | |||
903 | config SP5100_TCO | 904 | config SP5100_TCO |
904 | tristate "AMD/ATI SP5100 TCO Timer/Watchdog" | 905 | tristate "AMD/ATI SP5100 TCO Timer/Watchdog" |
905 | depends on X86 && PCI | 906 | depends on X86 && PCI |
907 | select WATCHDOG_CORE | ||
906 | ---help--- | 908 | ---help--- |
907 | Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO | 909 | Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO |
908 | (Total Cost of Ownership) timer is a watchdog timer that will reboot | 910 | (Total Cost of Ownership) timer is a watchdog timer that will reboot |
@@ -1008,6 +1010,7 @@ config WAFER_WDT | |||
1008 | config I6300ESB_WDT | 1010 | config I6300ESB_WDT |
1009 | tristate "Intel 6300ESB Timer/Watchdog" | 1011 | tristate "Intel 6300ESB Timer/Watchdog" |
1010 | depends on PCI | 1012 | depends on PCI |
1013 | select WATCHDOG_CORE | ||
1011 | ---help--- | 1014 | ---help--- |
1012 | Hardware driver for the watchdog timer built into the Intel | 1015 | Hardware driver for the watchdog timer built into the Intel |
1013 | 6300ESB controller hub. | 1016 | 6300ESB controller hub. |
@@ -1837,6 +1840,7 @@ config WATCHDOG_SUN4V | |||
1837 | config XEN_WDT | 1840 | config XEN_WDT |
1838 | tristate "Xen Watchdog support" | 1841 | tristate "Xen Watchdog support" |
1839 | depends on XEN | 1842 | depends on XEN |
1843 | select WATCHDOG_CORE | ||
1840 | help | 1844 | help |
1841 | Say Y here to support the hypervisor watchdog capability provided | 1845 | Say Y here to support the hypervisor watchdog capability provided |
1842 | by Xen 4.0 and newer. The watchdog timeout period is normally one | 1846 | by Xen 4.0 and newer. The watchdog timeout period is normally one |
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index e0678c14480f..3a33c5344bd5 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c | |||
@@ -566,7 +566,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf, | |||
566 | char c; | 566 | char c; |
567 | if (get_user(c, buf + i)) | 567 | if (get_user(c, buf + i)) |
568 | return -EFAULT; | 568 | return -EFAULT; |
569 | expect_close = (c == 'V'); | 569 | if (c == 'V') |
570 | expect_close = true; | ||
570 | } | 571 | } |
571 | 572 | ||
572 | /* Properly order writes across fork()ed processes */ | 573 | /* Properly order writes across fork()ed processes */ |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index f1f00dfc0e68..b0a158073abd 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -28,16 +28,7 @@ | |||
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/uaccess.h> | 29 | #include <linux/uaccess.h> |
30 | #include <linux/watchdog.h> | 30 | #include <linux/watchdog.h> |
31 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
32 | #include <linux/dmi.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/nmi.h> | ||
35 | #include <linux/kdebug.h> | ||
36 | #include <linux/notifier.h> | ||
37 | #include <asm/set_memory.h> | ||
38 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | ||
39 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
40 | #include <asm/frame.h> | ||
41 | 32 | ||
42 | #define HPWDT_VERSION "1.4.0" | 33 | #define HPWDT_VERSION "1.4.0" |
43 | #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) | 34 | #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) |
@@ -48,6 +39,9 @@ | |||
48 | static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ | 39 | static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ |
49 | static unsigned int reload; /* the computed soft_margin */ | 40 | static unsigned int reload; /* the computed soft_margin */ |
50 | static bool nowayout = WATCHDOG_NOWAYOUT; | 41 | static bool nowayout = WATCHDOG_NOWAYOUT; |
42 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
43 | static unsigned int allow_kdump = 1; | ||
44 | #endif | ||
51 | static char expect_release; | 45 | static char expect_release; |
52 | static unsigned long hpwdt_is_open; | 46 | static unsigned long hpwdt_is_open; |
53 | 47 | ||
@@ -63,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = { | |||
63 | }; | 57 | }; |
64 | MODULE_DEVICE_TABLE(pci, hpwdt_devices); | 58 | MODULE_DEVICE_TABLE(pci, hpwdt_devices); |
65 | 59 | ||
66 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
67 | #define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */ | ||
68 | #define CRU_BIOS_SIGNATURE_VALUE 0x55524324 | ||
69 | #define PCI_BIOS32_PARAGRAPH_LEN 16 | ||
70 | #define PCI_ROM_BASE1 0x000F0000 | ||
71 | #define ROM_SIZE 0x10000 | ||
72 | |||
73 | struct bios32_service_dir { | ||
74 | u32 signature; | ||
75 | u32 entry_point; | ||
76 | u8 revision; | ||
77 | u8 length; | ||
78 | u8 checksum; | ||
79 | u8 reserved[5]; | ||
80 | }; | ||
81 | |||
82 | /* type 212 */ | ||
83 | struct smbios_cru64_info { | ||
84 | u8 type; | ||
85 | u8 byte_length; | ||
86 | u16 handle; | ||
87 | u32 signature; | ||
88 | u64 physical_address; | ||
89 | u32 double_length; | ||
90 | u32 double_offset; | ||
91 | }; | ||
92 | #define SMBIOS_CRU64_INFORMATION 212 | ||
93 | |||
94 | /* type 219 */ | ||
95 | struct smbios_proliant_info { | ||
96 | u8 type; | ||
97 | u8 byte_length; | ||
98 | u16 handle; | ||
99 | u32 power_features; | ||
100 | u32 omega_features; | ||
101 | u32 reserved; | ||
102 | u32 misc_features; | ||
103 | }; | ||
104 | #define SMBIOS_ICRU_INFORMATION 219 | ||
105 | |||
106 | |||
107 | struct cmn_registers { | ||
108 | union { | ||
109 | struct { | ||
110 | u8 ral; | ||
111 | u8 rah; | ||
112 | u16 rea2; | ||
113 | }; | ||
114 | u32 reax; | ||
115 | } u1; | ||
116 | union { | ||
117 | struct { | ||
118 | u8 rbl; | ||
119 | u8 rbh; | ||
120 | u8 reb2l; | ||
121 | u8 reb2h; | ||
122 | }; | ||
123 | u32 rebx; | ||
124 | } u2; | ||
125 | union { | ||
126 | struct { | ||
127 | u8 rcl; | ||
128 | u8 rch; | ||
129 | u16 rec2; | ||
130 | }; | ||
131 | u32 recx; | ||
132 | } u3; | ||
133 | union { | ||
134 | struct { | ||
135 | u8 rdl; | ||
136 | u8 rdh; | ||
137 | u16 red2; | ||
138 | }; | ||
139 | u32 redx; | ||
140 | } u4; | ||
141 | |||
142 | u32 resi; | ||
143 | u32 redi; | ||
144 | u16 rds; | ||
145 | u16 res; | ||
146 | u32 reflags; | ||
147 | } __attribute__((packed)); | ||
148 | |||
149 | static unsigned int hpwdt_nmi_decoding; | ||
150 | static unsigned int allow_kdump = 1; | ||
151 | static unsigned int is_icru; | ||
152 | static unsigned int is_uefi; | ||
153 | static DEFINE_SPINLOCK(rom_lock); | ||
154 | static void *cru_rom_addr; | ||
155 | static struct cmn_registers cmn_regs; | ||
156 | |||
157 | extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs, | ||
158 | unsigned long *pRomEntry); | ||
159 | |||
160 | #ifdef CONFIG_X86_32 | ||
161 | /* --32 Bit Bios------------------------------------------------------------ */ | ||
162 | |||
163 | #define HPWDT_ARCH 32 | ||
164 | |||
165 | asm(".text \n\t" | ||
166 | ".align 4 \n\t" | ||
167 | ".globl asminline_call \n" | ||
168 | "asminline_call: \n\t" | ||
169 | "pushl %ebp \n\t" | ||
170 | "movl %esp, %ebp \n\t" | ||
171 | "pusha \n\t" | ||
172 | "pushf \n\t" | ||
173 | "push %es \n\t" | ||
174 | "push %ds \n\t" | ||
175 | "pop %es \n\t" | ||
176 | "movl 8(%ebp),%eax \n\t" | ||
177 | "movl 4(%eax),%ebx \n\t" | ||
178 | "movl 8(%eax),%ecx \n\t" | ||
179 | "movl 12(%eax),%edx \n\t" | ||
180 | "movl 16(%eax),%esi \n\t" | ||
181 | "movl 20(%eax),%edi \n\t" | ||
182 | "movl (%eax),%eax \n\t" | ||
183 | "push %cs \n\t" | ||
184 | "call *12(%ebp) \n\t" | ||
185 | "pushf \n\t" | ||
186 | "pushl %eax \n\t" | ||
187 | "movl 8(%ebp),%eax \n\t" | ||
188 | "movl %ebx,4(%eax) \n\t" | ||
189 | "movl %ecx,8(%eax) \n\t" | ||
190 | "movl %edx,12(%eax) \n\t" | ||
191 | "movl %esi,16(%eax) \n\t" | ||
192 | "movl %edi,20(%eax) \n\t" | ||
193 | "movw %ds,24(%eax) \n\t" | ||
194 | "movw %es,26(%eax) \n\t" | ||
195 | "popl %ebx \n\t" | ||
196 | "movl %ebx,(%eax) \n\t" | ||
197 | "popl %ebx \n\t" | ||
198 | "movl %ebx,28(%eax) \n\t" | ||
199 | "pop %es \n\t" | ||
200 | "popf \n\t" | ||
201 | "popa \n\t" | ||
202 | "leave \n\t" | ||
203 | "ret \n\t" | ||
204 | ".previous"); | ||
205 | |||
206 | |||
207 | /* | ||
208 | * cru_detect | ||
209 | * | ||
210 | * Routine Description: | ||
211 | * This function uses the 32-bit BIOS Service Directory record to | ||
212 | * search for a $CRU record. | ||
213 | * | ||
214 | * Return Value: | ||
215 | * 0 : SUCCESS | ||
216 | * <0 : FAILURE | ||
217 | */ | ||
218 | static int cru_detect(unsigned long map_entry, | ||
219 | unsigned long map_offset) | ||
220 | { | ||
221 | void *bios32_map; | ||
222 | unsigned long *bios32_entrypoint; | ||
223 | unsigned long cru_physical_address; | ||
224 | unsigned long cru_length; | ||
225 | unsigned long physical_bios_base = 0; | ||
226 | unsigned long physical_bios_offset = 0; | ||
227 | int retval = -ENODEV; | ||
228 | |||
229 | bios32_map = ioremap(map_entry, (2 * PAGE_SIZE)); | ||
230 | |||
231 | if (bios32_map == NULL) | ||
232 | return -ENODEV; | ||
233 | |||
234 | bios32_entrypoint = bios32_map + map_offset; | ||
235 | |||
236 | cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE; | ||
237 | |||
238 | set_memory_x((unsigned long)bios32_map, 2); | ||
239 | asminline_call(&cmn_regs, bios32_entrypoint); | ||
240 | |||
241 | if (cmn_regs.u1.ral != 0) { | ||
242 | pr_warn("Call succeeded but with an error: 0x%x\n", | ||
243 | cmn_regs.u1.ral); | ||
244 | } else { | ||
245 | physical_bios_base = cmn_regs.u2.rebx; | ||
246 | physical_bios_offset = cmn_regs.u4.redx; | ||
247 | cru_length = cmn_regs.u3.recx; | ||
248 | cru_physical_address = | ||
249 | physical_bios_base + physical_bios_offset; | ||
250 | |||
251 | /* If the values look OK, then map it in. */ | ||
252 | if ((physical_bios_base + physical_bios_offset)) { | ||
253 | cru_rom_addr = | ||
254 | ioremap(cru_physical_address, cru_length); | ||
255 | if (cru_rom_addr) { | ||
256 | set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK, | ||
257 | (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT); | ||
258 | retval = 0; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base); | ||
263 | pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset); | ||
264 | pr_debug("CRU Length: 0x%lx\n", cru_length); | ||
265 | pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr); | ||
266 | } | ||
267 | iounmap(bios32_map); | ||
268 | return retval; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * bios_checksum | ||
273 | */ | ||
274 | static int bios_checksum(const char __iomem *ptr, int len) | ||
275 | { | ||
276 | char sum = 0; | ||
277 | int i; | ||
278 | |||
279 | /* | ||
280 | * calculate checksum of size bytes. This should add up | ||
281 | * to zero if we have a valid header. | ||
282 | */ | ||
283 | for (i = 0; i < len; i++) | ||
284 | sum += ptr[i]; | ||
285 | |||
286 | return ((sum == 0) && (len > 0)); | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * bios32_present | ||
291 | * | ||
292 | * Routine Description: | ||
293 | * This function finds the 32-bit BIOS Service Directory | ||
294 | * | ||
295 | * Return Value: | ||
296 | * 0 : SUCCESS | ||
297 | * <0 : FAILURE | ||
298 | */ | ||
299 | static int bios32_present(const char __iomem *p) | ||
300 | { | ||
301 | struct bios32_service_dir *bios_32_ptr; | ||
302 | int length; | ||
303 | unsigned long map_entry, map_offset; | ||
304 | |||
305 | bios_32_ptr = (struct bios32_service_dir *) p; | ||
306 | |||
307 | /* | ||
308 | * Search for signature by checking equal to the swizzled value | ||
309 | * instead of calling another routine to perform a strcmp. | ||
310 | */ | ||
311 | if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) { | ||
312 | length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN; | ||
313 | if (bios_checksum(p, length)) { | ||
314 | /* | ||
315 | * According to the spec, we're looking for the | ||
316 | * first 4KB-aligned address below the entrypoint | ||
317 | * listed in the header. The Service Directory code | ||
318 | * is guaranteed to occupy no more than 2 4KB pages. | ||
319 | */ | ||
320 | map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1); | ||
321 | map_offset = bios_32_ptr->entry_point - map_entry; | ||
322 | |||
323 | return cru_detect(map_entry, map_offset); | ||
324 | } | ||
325 | } | ||
326 | return -ENODEV; | ||
327 | } | ||
328 | |||
329 | static int detect_cru_service(void) | ||
330 | { | ||
331 | char __iomem *p, *q; | ||
332 | int rc = -1; | ||
333 | |||
334 | /* | ||
335 | * Search from 0x0f0000 through 0x0fffff, inclusive. | ||
336 | */ | ||
337 | p = ioremap(PCI_ROM_BASE1, ROM_SIZE); | ||
338 | if (p == NULL) | ||
339 | return -ENOMEM; | ||
340 | |||
341 | for (q = p; q < p + ROM_SIZE; q += 16) { | ||
342 | rc = bios32_present(q); | ||
343 | if (!rc) | ||
344 | break; | ||
345 | } | ||
346 | iounmap(p); | ||
347 | return rc; | ||
348 | } | ||
349 | /* ------------------------------------------------------------------------- */ | ||
350 | #endif /* CONFIG_X86_32 */ | ||
351 | #ifdef CONFIG_X86_64 | ||
352 | /* --64 Bit Bios------------------------------------------------------------ */ | ||
353 | |||
354 | #define HPWDT_ARCH 64 | ||
355 | |||
356 | asm(".text \n\t" | ||
357 | ".align 4 \n\t" | ||
358 | ".globl asminline_call \n\t" | ||
359 | ".type asminline_call, @function \n\t" | ||
360 | "asminline_call: \n\t" | ||
361 | FRAME_BEGIN | ||
362 | "pushq %rax \n\t" | ||
363 | "pushq %rbx \n\t" | ||
364 | "pushq %rdx \n\t" | ||
365 | "pushq %r12 \n\t" | ||
366 | "pushq %r9 \n\t" | ||
367 | "movq %rsi, %r12 \n\t" | ||
368 | "movq %rdi, %r9 \n\t" | ||
369 | "movl 4(%r9),%ebx \n\t" | ||
370 | "movl 8(%r9),%ecx \n\t" | ||
371 | "movl 12(%r9),%edx \n\t" | ||
372 | "movl 16(%r9),%esi \n\t" | ||
373 | "movl 20(%r9),%edi \n\t" | ||
374 | "movl (%r9),%eax \n\t" | ||
375 | "call *%r12 \n\t" | ||
376 | "pushfq \n\t" | ||
377 | "popq %r12 \n\t" | ||
378 | "movl %eax, (%r9) \n\t" | ||
379 | "movl %ebx, 4(%r9) \n\t" | ||
380 | "movl %ecx, 8(%r9) \n\t" | ||
381 | "movl %edx, 12(%r9) \n\t" | ||
382 | "movl %esi, 16(%r9) \n\t" | ||
383 | "movl %edi, 20(%r9) \n\t" | ||
384 | "movq %r12, %rax \n\t" | ||
385 | "movl %eax, 28(%r9) \n\t" | ||
386 | "popq %r9 \n\t" | ||
387 | "popq %r12 \n\t" | ||
388 | "popq %rdx \n\t" | ||
389 | "popq %rbx \n\t" | ||
390 | "popq %rax \n\t" | ||
391 | FRAME_END | ||
392 | "ret \n\t" | ||
393 | ".previous"); | ||
394 | |||
395 | /* | ||
396 | * dmi_find_cru | ||
397 | * | ||
398 | * Routine Description: | ||
399 | * This function checks whether or not a SMBIOS/DMI record is | ||
400 | * the 64bit CRU info or not | ||
401 | */ | ||
402 | static void dmi_find_cru(const struct dmi_header *dm, void *dummy) | ||
403 | { | ||
404 | struct smbios_cru64_info *smbios_cru64_ptr; | ||
405 | unsigned long cru_physical_address; | ||
406 | |||
407 | if (dm->type == SMBIOS_CRU64_INFORMATION) { | ||
408 | smbios_cru64_ptr = (struct smbios_cru64_info *) dm; | ||
409 | if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) { | ||
410 | cru_physical_address = | ||
411 | smbios_cru64_ptr->physical_address + | ||
412 | smbios_cru64_ptr->double_offset; | ||
413 | cru_rom_addr = ioremap(cru_physical_address, | ||
414 | smbios_cru64_ptr->double_length); | ||
415 | set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK, | ||
416 | smbios_cru64_ptr->double_length >> PAGE_SHIFT); | ||
417 | } | ||
418 | } | ||
419 | } | ||
420 | |||
421 | static int detect_cru_service(void) | ||
422 | { | ||
423 | cru_rom_addr = NULL; | ||
424 | |||
425 | dmi_walk(dmi_find_cru, NULL); | ||
426 | |||
427 | /* if cru_rom_addr has been set then we found a CRU service */ | ||
428 | return ((cru_rom_addr != NULL) ? 0 : -ENODEV); | ||
429 | } | ||
430 | /* ------------------------------------------------------------------------- */ | ||
431 | #endif /* CONFIG_X86_64 */ | ||
432 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | ||
433 | 60 | ||
434 | /* | 61 | /* |
435 | * Watchdog operations | 62 | * Watchdog operations |
@@ -486,30 +113,12 @@ static int hpwdt_my_nmi(void) | |||
486 | */ | 113 | */ |
487 | static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) | 114 | static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) |
488 | { | 115 | { |
489 | unsigned long rom_pl; | ||
490 | static int die_nmi_called; | ||
491 | |||
492 | if (!hpwdt_nmi_decoding) | ||
493 | return NMI_DONE; | ||
494 | |||
495 | if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi()) | 116 | if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi()) |
496 | return NMI_DONE; | 117 | return NMI_DONE; |
497 | 118 | ||
498 | spin_lock_irqsave(&rom_lock, rom_pl); | ||
499 | if (!die_nmi_called && !is_icru && !is_uefi) | ||
500 | asminline_call(&cmn_regs, cru_rom_addr); | ||
501 | die_nmi_called = 1; | ||
502 | spin_unlock_irqrestore(&rom_lock, rom_pl); | ||
503 | |||
504 | if (allow_kdump) | 119 | if (allow_kdump) |
505 | hpwdt_stop(); | 120 | hpwdt_stop(); |
506 | 121 | ||
507 | if (!is_icru && !is_uefi) { | ||
508 | if (cmn_regs.u1.ral == 0) { | ||
509 | nmi_panic(regs, "An NMI occurred, but unable to determine source.\n"); | ||
510 | return NMI_HANDLED; | ||
511 | } | ||
512 | } | ||
513 | nmi_panic(regs, "An NMI occurred. Depending on your system the reason " | 122 | nmi_panic(regs, "An NMI occurred. Depending on your system the reason " |
514 | "for the NMI is logged in any one of the following " | 123 | "for the NMI is logged in any one of the following " |
515 | "resources:\n" | 124 | "resources:\n" |
@@ -675,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = { | |||
675 | * Init & Exit | 284 | * Init & Exit |
676 | */ | 285 | */ |
677 | 286 | ||
678 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
679 | #ifdef CONFIG_X86_LOCAL_APIC | ||
680 | static void hpwdt_check_nmi_decoding(struct pci_dev *dev) | ||
681 | { | ||
682 | /* | ||
683 | * If nmi_watchdog is turned off then we can turn on | ||
684 | * our nmi decoding capability. | ||
685 | */ | ||
686 | hpwdt_nmi_decoding = 1; | ||
687 | } | ||
688 | #else | ||
689 | static void hpwdt_check_nmi_decoding(struct pci_dev *dev) | ||
690 | { | ||
691 | dev_warn(&dev->dev, "NMI decoding is disabled. " | ||
692 | "Your kernel does not support a NMI Watchdog.\n"); | ||
693 | } | ||
694 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
695 | |||
696 | /* | ||
697 | * dmi_find_icru | ||
698 | * | ||
699 | * Routine Description: | ||
700 | * This function checks whether or not we are on an iCRU-based server. | ||
701 | * This check is independent of architecture and needs to be made for | ||
702 | * any ProLiant system. | ||
703 | */ | ||
704 | static void dmi_find_icru(const struct dmi_header *dm, void *dummy) | ||
705 | { | ||
706 | struct smbios_proliant_info *smbios_proliant_ptr; | ||
707 | |||
708 | if (dm->type == SMBIOS_ICRU_INFORMATION) { | ||
709 | smbios_proliant_ptr = (struct smbios_proliant_info *) dm; | ||
710 | if (smbios_proliant_ptr->misc_features & 0x01) | ||
711 | is_icru = 1; | ||
712 | if (smbios_proliant_ptr->misc_features & 0x1400) | ||
713 | is_uefi = 1; | ||
714 | } | ||
715 | } | ||
716 | 287 | ||
717 | static int hpwdt_init_nmi_decoding(struct pci_dev *dev) | 288 | static int hpwdt_init_nmi_decoding(struct pci_dev *dev) |
718 | { | 289 | { |
290 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
719 | int retval; | 291 | int retval; |
720 | |||
721 | /* | ||
722 | * On typical CRU-based systems we need to map that service in | ||
723 | * the BIOS. For 32 bit Operating Systems we need to go through | ||
724 | * the 32 Bit BIOS Service Directory. For 64 bit Operating | ||
725 | * Systems we get that service through SMBIOS. | ||
726 | * | ||
727 | * On systems that support the new iCRU service all we need to | ||
728 | * do is call dmi_walk to get the supported flag value and skip | ||
729 | * the old cru detect code. | ||
730 | */ | ||
731 | dmi_walk(dmi_find_icru, NULL); | ||
732 | if (!is_icru && !is_uefi) { | ||
733 | |||
734 | /* | ||
735 | * We need to map the ROM to get the CRU service. | ||
736 | * For 32 bit Operating Systems we need to go through the 32 Bit | ||
737 | * BIOS Service Directory | ||
738 | * For 64 bit Operating Systems we get that service through SMBIOS. | ||
739 | */ | ||
740 | retval = detect_cru_service(); | ||
741 | if (retval < 0) { | ||
742 | dev_warn(&dev->dev, | ||
743 | "Unable to detect the %d Bit CRU Service.\n", | ||
744 | HPWDT_ARCH); | ||
745 | return retval; | ||
746 | } | ||
747 | |||
748 | /* | ||
749 | * We know this is the only CRU call we need to make so lets keep as | ||
750 | * few instructions as possible once the NMI comes in. | ||
751 | */ | ||
752 | cmn_regs.u1.rah = 0x0D; | ||
753 | cmn_regs.u1.ral = 0x02; | ||
754 | } | ||
755 | |||
756 | /* | 292 | /* |
757 | * Only one function can register for NMI_UNKNOWN | 293 | * Only one function can register for NMI_UNKNOWN |
758 | */ | 294 | */ |
@@ -780,45 +316,26 @@ error: | |||
780 | dev_warn(&dev->dev, | 316 | dev_warn(&dev->dev, |
781 | "Unable to register a die notifier (err=%d).\n", | 317 | "Unable to register a die notifier (err=%d).\n", |
782 | retval); | 318 | retval); |
783 | if (cru_rom_addr) | ||
784 | iounmap(cru_rom_addr); | ||
785 | return retval; | 319 | return retval; |
320 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | ||
321 | return 0; | ||
786 | } | 322 | } |
787 | 323 | ||
788 | static void hpwdt_exit_nmi_decoding(void) | 324 | static void hpwdt_exit_nmi_decoding(void) |
789 | { | 325 | { |
326 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
790 | unregister_nmi_handler(NMI_UNKNOWN, "hpwdt"); | 327 | unregister_nmi_handler(NMI_UNKNOWN, "hpwdt"); |
791 | unregister_nmi_handler(NMI_SERR, "hpwdt"); | 328 | unregister_nmi_handler(NMI_SERR, "hpwdt"); |
792 | unregister_nmi_handler(NMI_IO_CHECK, "hpwdt"); | 329 | unregister_nmi_handler(NMI_IO_CHECK, "hpwdt"); |
793 | if (cru_rom_addr) | 330 | #endif |
794 | iounmap(cru_rom_addr); | ||
795 | } | ||
796 | #else /* !CONFIG_HPWDT_NMI_DECODING */ | ||
797 | static void hpwdt_check_nmi_decoding(struct pci_dev *dev) | ||
798 | { | ||
799 | } | ||
800 | |||
801 | static int hpwdt_init_nmi_decoding(struct pci_dev *dev) | ||
802 | { | ||
803 | return 0; | ||
804 | } | 331 | } |
805 | 332 | ||
806 | static void hpwdt_exit_nmi_decoding(void) | ||
807 | { | ||
808 | } | ||
809 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | ||
810 | |||
811 | static int hpwdt_init_one(struct pci_dev *dev, | 333 | static int hpwdt_init_one(struct pci_dev *dev, |
812 | const struct pci_device_id *ent) | 334 | const struct pci_device_id *ent) |
813 | { | 335 | { |
814 | int retval; | 336 | int retval; |
815 | 337 | ||
816 | /* | 338 | /* |
817 | * Check if we can do NMI decoding or not | ||
818 | */ | ||
819 | hpwdt_check_nmi_decoding(dev); | ||
820 | |||
821 | /* | ||
822 | * First let's find out if we are on an iLO2+ server. We will | 339 | * First let's find out if we are on an iLO2+ server. We will |
823 | * not run on a legacy ASM box. | 340 | * not run on a legacy ASM box. |
824 | * So we only support the G5 ProLiant servers and higher. | 341 | * So we only support the G5 ProLiant servers and higher. |
@@ -922,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" | |||
922 | #ifdef CONFIG_HPWDT_NMI_DECODING | 439 | #ifdef CONFIG_HPWDT_NMI_DECODING |
923 | module_param(allow_kdump, int, 0); | 440 | module_param(allow_kdump, int, 0); |
924 | MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); | 441 | MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); |
925 | #endif /* !CONFIG_HPWDT_NMI_DECODING */ | 442 | #endif /* CONFIG_HPWDT_NMI_DECODING */ |
926 | 443 | ||
927 | module_pci_driver(hpwdt_driver); | 444 | module_pci_driver(hpwdt_driver); |
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index 316c2eb122d2..e8bd9887c566 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c | |||
@@ -50,6 +50,7 @@ | |||
50 | */ | 50 | */ |
51 | 51 | ||
52 | #include <linux/io.h> | 52 | #include <linux/io.h> |
53 | #include <linux/io-64-nonatomic-lo-hi.h> | ||
53 | #include <linux/interrupt.h> | 54 | #include <linux/interrupt.h> |
54 | #include <linux/module.h> | 55 | #include <linux/module.h> |
55 | #include <linux/moduleparam.h> | 56 | #include <linux/moduleparam.h> |
@@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd) | |||
159 | !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) | 160 | !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) |
160 | timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); | 161 | timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); |
161 | 162 | ||
162 | timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) - | 163 | timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) - |
163 | arch_counter_get_cntvct(); | 164 | arch_counter_get_cntvct(); |
164 | 165 | ||
165 | do_div(timeleft, gwdt->clk); | 166 | do_div(timeleft, gwdt->clk); |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 1ab4bd11f5f3..762378f1811c 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -755,8 +755,8 @@ out: | |||
755 | mutex_unlock(&irq_mapping_update_lock); | 755 | mutex_unlock(&irq_mapping_update_lock); |
756 | return irq; | 756 | return irq; |
757 | error_irq: | 757 | error_irq: |
758 | for (; i >= 0; i--) | 758 | while (nvec--) |
759 | __unbind_from_irq(irq + i); | 759 | __unbind_from_irq(irq + nvec); |
760 | mutex_unlock(&irq_mapping_update_lock); | 760 | mutex_unlock(&irq_mapping_update_lock); |
761 | return ret; | 761 | return ret; |
762 | } | 762 | } |
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 156e5aea36db..b1092fbefa63 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c | |||
@@ -416,7 +416,7 @@ static int pvcalls_back_connect(struct xenbus_device *dev, | |||
416 | sock); | 416 | sock); |
417 | if (!map) { | 417 | if (!map) { |
418 | ret = -EFAULT; | 418 | ret = -EFAULT; |
419 | sock_release(map->sock); | 419 | sock_release(sock); |
420 | } | 420 | } |
421 | 421 | ||
422 | out: | 422 | out: |
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index aedbee3b2838..2f11ca72a281 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c | |||
@@ -73,20 +73,25 @@ struct sock_mapping { | |||
73 | wait_queue_head_t inflight_conn_req; | 73 | wait_queue_head_t inflight_conn_req; |
74 | } active; | 74 | } active; |
75 | struct { | 75 | struct { |
76 | /* Socket status */ | 76 | /* |
77 | * Socket status, needs to be 64-bit aligned due to the | ||
78 | * test_and_* functions which have this requirement on arm64. | ||
79 | */ | ||
77 | #define PVCALLS_STATUS_UNINITALIZED 0 | 80 | #define PVCALLS_STATUS_UNINITALIZED 0 |
78 | #define PVCALLS_STATUS_BIND 1 | 81 | #define PVCALLS_STATUS_BIND 1 |
79 | #define PVCALLS_STATUS_LISTEN 2 | 82 | #define PVCALLS_STATUS_LISTEN 2 |
80 | uint8_t status; | 83 | uint8_t status __attribute__((aligned(8))); |
81 | /* | 84 | /* |
82 | * Internal state-machine flags. | 85 | * Internal state-machine flags. |
83 | * Only one accept operation can be inflight for a socket. | 86 | * Only one accept operation can be inflight for a socket. |
84 | * Only one poll operation can be inflight for a given socket. | 87 | * Only one poll operation can be inflight for a given socket. |
88 | * flags needs to be 64-bit aligned due to the test_and_* | ||
89 | * functions which have this requirement on arm64. | ||
85 | */ | 90 | */ |
86 | #define PVCALLS_FLAG_ACCEPT_INFLIGHT 0 | 91 | #define PVCALLS_FLAG_ACCEPT_INFLIGHT 0 |
87 | #define PVCALLS_FLAG_POLL_INFLIGHT 1 | 92 | #define PVCALLS_FLAG_POLL_INFLIGHT 1 |
88 | #define PVCALLS_FLAG_POLL_RET 2 | 93 | #define PVCALLS_FLAG_POLL_RET 2 |
89 | uint8_t flags; | 94 | uint8_t flags __attribute__((aligned(8))); |
90 | uint32_t inflight_req_id; | 95 | uint32_t inflight_req_id; |
91 | struct sock_mapping *accept_map; | 96 | struct sock_mapping *accept_map; |
92 | wait_queue_head_t inflight_accept_req; | 97 | wait_queue_head_t inflight_accept_req; |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 74888cacd0b0..ec9eb4fba59c 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -466,8 +466,11 @@ int xenbus_probe_node(struct xen_bus_type *bus, | |||
466 | 466 | ||
467 | /* Register with generic device framework. */ | 467 | /* Register with generic device framework. */ |
468 | err = device_register(&xendev->dev); | 468 | err = device_register(&xendev->dev); |
469 | if (err) | 469 | if (err) { |
470 | put_device(&xendev->dev); | ||
471 | xendev = NULL; | ||
470 | goto fail; | 472 | goto fail; |
473 | } | ||
471 | 474 | ||
472 | return 0; | 475 | return 0; |
473 | fail: | 476 | fail: |
@@ -68,9 +68,9 @@ struct aio_ring { | |||
68 | #define AIO_RING_PAGES 8 | 68 | #define AIO_RING_PAGES 8 |
69 | 69 | ||
70 | struct kioctx_table { | 70 | struct kioctx_table { |
71 | struct rcu_head rcu; | 71 | struct rcu_head rcu; |
72 | unsigned nr; | 72 | unsigned nr; |
73 | struct kioctx *table[]; | 73 | struct kioctx __rcu *table[]; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | struct kioctx_cpu { | 76 | struct kioctx_cpu { |
@@ -115,7 +115,8 @@ struct kioctx { | |||
115 | struct page **ring_pages; | 115 | struct page **ring_pages; |
116 | long nr_pages; | 116 | long nr_pages; |
117 | 117 | ||
118 | struct work_struct free_work; | 118 | struct rcu_head free_rcu; |
119 | struct work_struct free_work; /* see free_ioctx() */ | ||
119 | 120 | ||
120 | /* | 121 | /* |
121 | * signals when all in-flight requests are done | 122 | * signals when all in-flight requests are done |
@@ -329,7 +330,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma) | |||
329 | for (i = 0; i < table->nr; i++) { | 330 | for (i = 0; i < table->nr; i++) { |
330 | struct kioctx *ctx; | 331 | struct kioctx *ctx; |
331 | 332 | ||
332 | ctx = table->table[i]; | 333 | ctx = rcu_dereference(table->table[i]); |
333 | if (ctx && ctx->aio_ring_file == file) { | 334 | if (ctx && ctx->aio_ring_file == file) { |
334 | if (!atomic_read(&ctx->dead)) { | 335 | if (!atomic_read(&ctx->dead)) { |
335 | ctx->user_id = ctx->mmap_base = vma->vm_start; | 336 | ctx->user_id = ctx->mmap_base = vma->vm_start; |
@@ -588,6 +589,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb) | |||
588 | return cancel(&kiocb->common); | 589 | return cancel(&kiocb->common); |
589 | } | 590 | } |
590 | 591 | ||
592 | /* | ||
593 | * free_ioctx() should be RCU delayed to synchronize against the RCU | ||
594 | * protected lookup_ioctx() and also needs process context to call | ||
595 | * aio_free_ring(), so the double bouncing through kioctx->free_rcu and | ||
596 | * ->free_work. | ||
597 | */ | ||
591 | static void free_ioctx(struct work_struct *work) | 598 | static void free_ioctx(struct work_struct *work) |
592 | { | 599 | { |
593 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); | 600 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); |
@@ -601,6 +608,14 @@ static void free_ioctx(struct work_struct *work) | |||
601 | kmem_cache_free(kioctx_cachep, ctx); | 608 | kmem_cache_free(kioctx_cachep, ctx); |
602 | } | 609 | } |
603 | 610 | ||
611 | static void free_ioctx_rcufn(struct rcu_head *head) | ||
612 | { | ||
613 | struct kioctx *ctx = container_of(head, struct kioctx, free_rcu); | ||
614 | |||
615 | INIT_WORK(&ctx->free_work, free_ioctx); | ||
616 | schedule_work(&ctx->free_work); | ||
617 | } | ||
618 | |||
604 | static void free_ioctx_reqs(struct percpu_ref *ref) | 619 | static void free_ioctx_reqs(struct percpu_ref *ref) |
605 | { | 620 | { |
606 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); | 621 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); |
@@ -609,8 +624,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref) | |||
609 | if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) | 624 | if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) |
610 | complete(&ctx->rq_wait->comp); | 625 | complete(&ctx->rq_wait->comp); |
611 | 626 | ||
612 | INIT_WORK(&ctx->free_work, free_ioctx); | 627 | /* Synchronize against RCU protected table->table[] dereferences */ |
613 | schedule_work(&ctx->free_work); | 628 | call_rcu(&ctx->free_rcu, free_ioctx_rcufn); |
614 | } | 629 | } |
615 | 630 | ||
616 | /* | 631 | /* |
@@ -651,9 +666,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) | |||
651 | while (1) { | 666 | while (1) { |
652 | if (table) | 667 | if (table) |
653 | for (i = 0; i < table->nr; i++) | 668 | for (i = 0; i < table->nr; i++) |
654 | if (!table->table[i]) { | 669 | if (!rcu_access_pointer(table->table[i])) { |
655 | ctx->id = i; | 670 | ctx->id = i; |
656 | table->table[i] = ctx; | 671 | rcu_assign_pointer(table->table[i], ctx); |
657 | spin_unlock(&mm->ioctx_lock); | 672 | spin_unlock(&mm->ioctx_lock); |
658 | 673 | ||
659 | /* While kioctx setup is in progress, | 674 | /* While kioctx setup is in progress, |
@@ -834,11 +849,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, | |||
834 | } | 849 | } |
835 | 850 | ||
836 | table = rcu_dereference_raw(mm->ioctx_table); | 851 | table = rcu_dereference_raw(mm->ioctx_table); |
837 | WARN_ON(ctx != table->table[ctx->id]); | 852 | WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); |
838 | table->table[ctx->id] = NULL; | 853 | RCU_INIT_POINTER(table->table[ctx->id], NULL); |
839 | spin_unlock(&mm->ioctx_lock); | 854 | spin_unlock(&mm->ioctx_lock); |
840 | 855 | ||
841 | /* percpu_ref_kill() will do the necessary call_rcu() */ | 856 | /* free_ioctx_reqs() will do the necessary RCU synchronization */ |
842 | wake_up_all(&ctx->wait); | 857 | wake_up_all(&ctx->wait); |
843 | 858 | ||
844 | /* | 859 | /* |
@@ -880,7 +895,8 @@ void exit_aio(struct mm_struct *mm) | |||
880 | 895 | ||
881 | skipped = 0; | 896 | skipped = 0; |
882 | for (i = 0; i < table->nr; ++i) { | 897 | for (i = 0; i < table->nr; ++i) { |
883 | struct kioctx *ctx = table->table[i]; | 898 | struct kioctx *ctx = |
899 | rcu_dereference_protected(table->table[i], true); | ||
884 | 900 | ||
885 | if (!ctx) { | 901 | if (!ctx) { |
886 | skipped++; | 902 | skipped++; |
@@ -1069,7 +1085,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) | |||
1069 | if (!table || id >= table->nr) | 1085 | if (!table || id >= table->nr) |
1070 | goto out; | 1086 | goto out; |
1071 | 1087 | ||
1072 | ctx = table->table[id]; | 1088 | ctx = rcu_dereference(table->table[id]); |
1073 | if (ctx && ctx->user_id == ctx_id) { | 1089 | if (ctx && ctx->user_id == ctx_id) { |
1074 | percpu_ref_get(&ctx->users); | 1090 | percpu_ref_get(&ctx->users); |
1075 | ret = ctx; | 1091 | ret = ctx; |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 4a181fcb5175..fe09ef9c21f3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1058,6 +1058,27 @@ retry: | |||
1058 | return 0; | 1058 | return 0; |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno) | ||
1062 | { | ||
1063 | struct gendisk *disk = get_gendisk(bdev->bd_dev, partno); | ||
1064 | |||
1065 | if (!disk) | ||
1066 | return NULL; | ||
1067 | /* | ||
1068 | * Now that we hold gendisk reference we make sure bdev we looked up is | ||
1069 | * not stale. If it is, it means device got removed and created before | ||
1070 | * we looked up gendisk and we fail open in such case. Associating | ||
1071 | * unhashed bdev with newly created gendisk could lead to two bdevs | ||
1072 | * (and thus two independent caches) being associated with one device | ||
1073 | * which is bad. | ||
1074 | */ | ||
1075 | if (inode_unhashed(bdev->bd_inode)) { | ||
1076 | put_disk_and_module(disk); | ||
1077 | return NULL; | ||
1078 | } | ||
1079 | return disk; | ||
1080 | } | ||
1081 | |||
1061 | /** | 1082 | /** |
1062 | * bd_start_claiming - start claiming a block device | 1083 | * bd_start_claiming - start claiming a block device |
1063 | * @bdev: block device of interest | 1084 | * @bdev: block device of interest |
@@ -1094,7 +1115,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, | |||
1094 | * @bdev might not have been initialized properly yet, look up | 1115 | * @bdev might not have been initialized properly yet, look up |
1095 | * and grab the outer block device the hard way. | 1116 | * and grab the outer block device the hard way. |
1096 | */ | 1117 | */ |
1097 | disk = get_gendisk(bdev->bd_dev, &partno); | 1118 | disk = bdev_get_gendisk(bdev, &partno); |
1098 | if (!disk) | 1119 | if (!disk) |
1099 | return ERR_PTR(-ENXIO); | 1120 | return ERR_PTR(-ENXIO); |
1100 | 1121 | ||
@@ -1111,8 +1132,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, | |||
1111 | else | 1132 | else |
1112 | whole = bdgrab(bdev); | 1133 | whole = bdgrab(bdev); |
1113 | 1134 | ||
1114 | module_put(disk->fops->owner); | 1135 | put_disk_and_module(disk); |
1115 | put_disk(disk); | ||
1116 | if (!whole) | 1136 | if (!whole) |
1117 | return ERR_PTR(-ENOMEM); | 1137 | return ERR_PTR(-ENOMEM); |
1118 | 1138 | ||
@@ -1407,10 +1427,10 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); | |||
1407 | static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | 1427 | static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) |
1408 | { | 1428 | { |
1409 | struct gendisk *disk; | 1429 | struct gendisk *disk; |
1410 | struct module *owner; | ||
1411 | int ret; | 1430 | int ret; |
1412 | int partno; | 1431 | int partno; |
1413 | int perm = 0; | 1432 | int perm = 0; |
1433 | bool first_open = false; | ||
1414 | 1434 | ||
1415 | if (mode & FMODE_READ) | 1435 | if (mode & FMODE_READ) |
1416 | perm |= MAY_READ; | 1436 | perm |= MAY_READ; |
@@ -1430,14 +1450,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1430 | restart: | 1450 | restart: |
1431 | 1451 | ||
1432 | ret = -ENXIO; | 1452 | ret = -ENXIO; |
1433 | disk = get_gendisk(bdev->bd_dev, &partno); | 1453 | disk = bdev_get_gendisk(bdev, &partno); |
1434 | if (!disk) | 1454 | if (!disk) |
1435 | goto out; | 1455 | goto out; |
1436 | owner = disk->fops->owner; | ||
1437 | 1456 | ||
1438 | disk_block_events(disk); | 1457 | disk_block_events(disk); |
1439 | mutex_lock_nested(&bdev->bd_mutex, for_part); | 1458 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
1440 | if (!bdev->bd_openers) { | 1459 | if (!bdev->bd_openers) { |
1460 | first_open = true; | ||
1441 | bdev->bd_disk = disk; | 1461 | bdev->bd_disk = disk; |
1442 | bdev->bd_queue = disk->queue; | 1462 | bdev->bd_queue = disk->queue; |
1443 | bdev->bd_contains = bdev; | 1463 | bdev->bd_contains = bdev; |
@@ -1463,8 +1483,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1463 | bdev->bd_queue = NULL; | 1483 | bdev->bd_queue = NULL; |
1464 | mutex_unlock(&bdev->bd_mutex); | 1484 | mutex_unlock(&bdev->bd_mutex); |
1465 | disk_unblock_events(disk); | 1485 | disk_unblock_events(disk); |
1466 | put_disk(disk); | 1486 | put_disk_and_module(disk); |
1467 | module_put(owner); | ||
1468 | goto restart; | 1487 | goto restart; |
1469 | } | 1488 | } |
1470 | } | 1489 | } |
@@ -1524,15 +1543,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1524 | if (ret) | 1543 | if (ret) |
1525 | goto out_unlock_bdev; | 1544 | goto out_unlock_bdev; |
1526 | } | 1545 | } |
1527 | /* only one opener holds refs to the module and disk */ | ||
1528 | put_disk(disk); | ||
1529 | module_put(owner); | ||
1530 | } | 1546 | } |
1531 | bdev->bd_openers++; | 1547 | bdev->bd_openers++; |
1532 | if (for_part) | 1548 | if (for_part) |
1533 | bdev->bd_part_count++; | 1549 | bdev->bd_part_count++; |
1534 | mutex_unlock(&bdev->bd_mutex); | 1550 | mutex_unlock(&bdev->bd_mutex); |
1535 | disk_unblock_events(disk); | 1551 | disk_unblock_events(disk); |
1552 | /* only one opener holds refs to the module and disk */ | ||
1553 | if (!first_open) | ||
1554 | put_disk_and_module(disk); | ||
1536 | return 0; | 1555 | return 0; |
1537 | 1556 | ||
1538 | out_clear: | 1557 | out_clear: |
@@ -1546,8 +1565,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1546 | out_unlock_bdev: | 1565 | out_unlock_bdev: |
1547 | mutex_unlock(&bdev->bd_mutex); | 1566 | mutex_unlock(&bdev->bd_mutex); |
1548 | disk_unblock_events(disk); | 1567 | disk_unblock_events(disk); |
1549 | put_disk(disk); | 1568 | put_disk_and_module(disk); |
1550 | module_put(owner); | ||
1551 | out: | 1569 | out: |
1552 | bdput(bdev); | 1570 | bdput(bdev); |
1553 | 1571 | ||
@@ -1770,8 +1788,6 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
1770 | disk->fops->release(disk, mode); | 1788 | disk->fops->release(disk, mode); |
1771 | } | 1789 | } |
1772 | if (!bdev->bd_openers) { | 1790 | if (!bdev->bd_openers) { |
1773 | struct module *owner = disk->fops->owner; | ||
1774 | |||
1775 | disk_put_part(bdev->bd_part); | 1791 | disk_put_part(bdev->bd_part); |
1776 | bdev->bd_part = NULL; | 1792 | bdev->bd_part = NULL; |
1777 | bdev->bd_disk = NULL; | 1793 | bdev->bd_disk = NULL; |
@@ -1779,8 +1795,7 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
1779 | victim = bdev->bd_contains; | 1795 | victim = bdev->bd_contains; |
1780 | bdev->bd_contains = NULL; | 1796 | bdev->bd_contains = NULL; |
1781 | 1797 | ||
1782 | put_disk(disk); | 1798 | put_disk_and_module(disk); |
1783 | module_put(owner); | ||
1784 | } | 1799 | } |
1785 | mutex_unlock(&bdev->bd_mutex); | 1800 | mutex_unlock(&bdev->bd_mutex); |
1786 | bdput(bdev); | 1801 | bdput(bdev); |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index f94b2d8c744a..26484648d090 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -1519,6 +1519,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr) | |||
1519 | if (!node) | 1519 | if (!node) |
1520 | break; | 1520 | break; |
1521 | bytenr = node->val; | 1521 | bytenr = node->val; |
1522 | shared.share_count = 0; | ||
1522 | cond_resched(); | 1523 | cond_resched(); |
1523 | } | 1524 | } |
1524 | 1525 | ||
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 1a462ab85c49..da308774b8a4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -2974,7 +2974,7 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info) | |||
2974 | kfree(fs_info->super_copy); | 2974 | kfree(fs_info->super_copy); |
2975 | kfree(fs_info->super_for_commit); | 2975 | kfree(fs_info->super_for_commit); |
2976 | security_free_mnt_opts(&fs_info->security_opts); | 2976 | security_free_mnt_opts(&fs_info->security_opts); |
2977 | kfree(fs_info); | 2977 | kvfree(fs_info); |
2978 | } | 2978 | } |
2979 | 2979 | ||
2980 | /* tree mod log functions from ctree.c */ | 2980 | /* tree mod log functions from ctree.c */ |
@@ -3095,7 +3095,10 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, | |||
3095 | u64 inode_objectid, u64 ref_objectid, int ins_len, | 3095 | u64 inode_objectid, u64 ref_objectid, int ins_len, |
3096 | int cow); | 3096 | int cow); |
3097 | 3097 | ||
3098 | int btrfs_find_name_in_ext_backref(struct btrfs_path *path, | 3098 | int btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot, |
3099 | const char *name, | ||
3100 | int name_len, struct btrfs_inode_ref **ref_ret); | ||
3101 | int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot, | ||
3099 | u64 ref_objectid, const char *name, | 3102 | u64 ref_objectid, const char *name, |
3100 | int name_len, | 3103 | int name_len, |
3101 | struct btrfs_inode_extref **extref_ret); | 3104 | struct btrfs_inode_extref **extref_ret); |
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index 39c968f80157..65e1a76bf755 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c | |||
@@ -22,10 +22,10 @@ | |||
22 | #include "transaction.h" | 22 | #include "transaction.h" |
23 | #include "print-tree.h" | 23 | #include "print-tree.h" |
24 | 24 | ||
25 | static int find_name_in_backref(struct btrfs_path *path, const char *name, | 25 | int btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot, |
26 | int name_len, struct btrfs_inode_ref **ref_ret) | 26 | const char *name, |
27 | int name_len, struct btrfs_inode_ref **ref_ret) | ||
27 | { | 28 | { |
28 | struct extent_buffer *leaf; | ||
29 | struct btrfs_inode_ref *ref; | 29 | struct btrfs_inode_ref *ref; |
30 | unsigned long ptr; | 30 | unsigned long ptr; |
31 | unsigned long name_ptr; | 31 | unsigned long name_ptr; |
@@ -33,9 +33,8 @@ static int find_name_in_backref(struct btrfs_path *path, const char *name, | |||
33 | u32 cur_offset = 0; | 33 | u32 cur_offset = 0; |
34 | int len; | 34 | int len; |
35 | 35 | ||
36 | leaf = path->nodes[0]; | 36 | item_size = btrfs_item_size_nr(leaf, slot); |
37 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | 37 | ptr = btrfs_item_ptr_offset(leaf, slot); |
38 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | ||
39 | while (cur_offset < item_size) { | 38 | while (cur_offset < item_size) { |
40 | ref = (struct btrfs_inode_ref *)(ptr + cur_offset); | 39 | ref = (struct btrfs_inode_ref *)(ptr + cur_offset); |
41 | len = btrfs_inode_ref_name_len(leaf, ref); | 40 | len = btrfs_inode_ref_name_len(leaf, ref); |
@@ -44,18 +43,19 @@ static int find_name_in_backref(struct btrfs_path *path, const char *name, | |||
44 | if (len != name_len) | 43 | if (len != name_len) |
45 | continue; | 44 | continue; |
46 | if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) { | 45 | if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) { |
47 | *ref_ret = ref; | 46 | if (ref_ret) |
47 | *ref_ret = ref; | ||
48 | return 1; | 48 | return 1; |
49 | } | 49 | } |
50 | } | 50 | } |
51 | return 0; | 51 | return 0; |
52 | } | 52 | } |
53 | 53 | ||
54 | int btrfs_find_name_in_ext_backref(struct btrfs_path *path, u64 ref_objectid, | 54 | int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot, |
55 | u64 ref_objectid, | ||
55 | const char *name, int name_len, | 56 | const char *name, int name_len, |
56 | struct btrfs_inode_extref **extref_ret) | 57 | struct btrfs_inode_extref **extref_ret) |
57 | { | 58 | { |
58 | struct extent_buffer *leaf; | ||
59 | struct btrfs_inode_extref *extref; | 59 | struct btrfs_inode_extref *extref; |
60 | unsigned long ptr; | 60 | unsigned long ptr; |
61 | unsigned long name_ptr; | 61 | unsigned long name_ptr; |
@@ -63,9 +63,8 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path, u64 ref_objectid, | |||
63 | u32 cur_offset = 0; | 63 | u32 cur_offset = 0; |
64 | int ref_name_len; | 64 | int ref_name_len; |
65 | 65 | ||
66 | leaf = path->nodes[0]; | 66 | item_size = btrfs_item_size_nr(leaf, slot); |
67 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | 67 | ptr = btrfs_item_ptr_offset(leaf, slot); |
68 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | ||
69 | 68 | ||
70 | /* | 69 | /* |
71 | * Search all extended backrefs in this item. We're only | 70 | * Search all extended backrefs in this item. We're only |
@@ -113,7 +112,9 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, | |||
113 | return ERR_PTR(ret); | 112 | return ERR_PTR(ret); |
114 | if (ret > 0) | 113 | if (ret > 0) |
115 | return NULL; | 114 | return NULL; |
116 | if (!btrfs_find_name_in_ext_backref(path, ref_objectid, name, name_len, &extref)) | 115 | if (!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], |
116 | ref_objectid, name, name_len, | ||
117 | &extref)) | ||
117 | return NULL; | 118 | return NULL; |
118 | return extref; | 119 | return extref; |
119 | } | 120 | } |
@@ -155,7 +156,8 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, | |||
155 | * This should always succeed so error here will make the FS | 156 | * This should always succeed so error here will make the FS |
156 | * readonly. | 157 | * readonly. |
157 | */ | 158 | */ |
158 | if (!btrfs_find_name_in_ext_backref(path, ref_objectid, | 159 | if (!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], |
160 | ref_objectid, | ||
159 | name, name_len, &extref)) { | 161 | name, name_len, &extref)) { |
160 | btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL); | 162 | btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL); |
161 | ret = -EROFS; | 163 | ret = -EROFS; |
@@ -225,7 +227,8 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, | |||
225 | } else if (ret < 0) { | 227 | } else if (ret < 0) { |
226 | goto out; | 228 | goto out; |
227 | } | 229 | } |
228 | if (!find_name_in_backref(path, name, name_len, &ref)) { | 230 | if (!btrfs_find_name_in_backref(path->nodes[0], path->slots[0], |
231 | name, name_len, &ref)) { | ||
229 | ret = -ENOENT; | 232 | ret = -ENOENT; |
230 | search_ext_refs = 1; | 233 | search_ext_refs = 1; |
231 | goto out; | 234 | goto out; |
@@ -293,7 +296,9 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, | |||
293 | ret = btrfs_insert_empty_item(trans, root, path, &key, | 296 | ret = btrfs_insert_empty_item(trans, root, path, &key, |
294 | ins_len); | 297 | ins_len); |
295 | if (ret == -EEXIST) { | 298 | if (ret == -EEXIST) { |
296 | if (btrfs_find_name_in_ext_backref(path, ref_objectid, | 299 | if (btrfs_find_name_in_ext_backref(path->nodes[0], |
300 | path->slots[0], | ||
301 | ref_objectid, | ||
297 | name, name_len, NULL)) | 302 | name, name_len, NULL)) |
298 | goto out; | 303 | goto out; |
299 | 304 | ||
@@ -351,7 +356,8 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, | |||
351 | if (ret == -EEXIST) { | 356 | if (ret == -EEXIST) { |
352 | u32 old_size; | 357 | u32 old_size; |
353 | 358 | ||
354 | if (find_name_in_backref(path, name, name_len, &ref)) | 359 | if (btrfs_find_name_in_backref(path->nodes[0], path->slots[0], |
360 | name, name_len, &ref)) | ||
355 | goto out; | 361 | goto out; |
356 | 362 | ||
357 | old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); | 363 | old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); |
@@ -365,7 +371,9 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, | |||
365 | ret = 0; | 371 | ret = 0; |
366 | } else if (ret < 0) { | 372 | } else if (ret < 0) { |
367 | if (ret == -EOVERFLOW) { | 373 | if (ret == -EOVERFLOW) { |
368 | if (find_name_in_backref(path, name, name_len, &ref)) | 374 | if (btrfs_find_name_in_backref(path->nodes[0], |
375 | path->slots[0], | ||
376 | name, name_len, &ref)) | ||
369 | ret = -EEXIST; | 377 | ret = -EEXIST; |
370 | else | 378 | else |
371 | ret = -EMLINK; | 379 | ret = -EMLINK; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a79299a89b7d..f53470112670 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -2043,12 +2043,15 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, | |||
2043 | struct inode *inode, struct list_head *list) | 2043 | struct inode *inode, struct list_head *list) |
2044 | { | 2044 | { |
2045 | struct btrfs_ordered_sum *sum; | 2045 | struct btrfs_ordered_sum *sum; |
2046 | int ret; | ||
2046 | 2047 | ||
2047 | list_for_each_entry(sum, list, list) { | 2048 | list_for_each_entry(sum, list, list) { |
2048 | trans->adding_csums = true; | 2049 | trans->adding_csums = true; |
2049 | btrfs_csum_file_blocks(trans, | 2050 | ret = btrfs_csum_file_blocks(trans, |
2050 | BTRFS_I(inode)->root->fs_info->csum_root, sum); | 2051 | BTRFS_I(inode)->root->fs_info->csum_root, sum); |
2051 | trans->adding_csums = false; | 2052 | trans->adding_csums = false; |
2053 | if (ret) | ||
2054 | return ret; | ||
2052 | } | 2055 | } |
2053 | return 0; | 2056 | return 0; |
2054 | } | 2057 | } |
@@ -3062,7 +3065,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) | |||
3062 | goto out; | 3065 | goto out; |
3063 | } | 3066 | } |
3064 | 3067 | ||
3065 | add_pending_csums(trans, inode, &ordered_extent->list); | 3068 | ret = add_pending_csums(trans, inode, &ordered_extent->list); |
3069 | if (ret) { | ||
3070 | btrfs_abort_transaction(trans, ret); | ||
3071 | goto out; | ||
3072 | } | ||
3066 | 3073 | ||
3067 | btrfs_ordered_update_i_size(inode, 0, ordered_extent); | 3074 | btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
3068 | ret = btrfs_update_inode_fallback(trans, root, inode); | 3075 | ret = btrfs_update_inode_fallback(trans, root, inode); |
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index dec0907dfb8a..fcfc20de2df3 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c | |||
@@ -1370,6 +1370,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio, | |||
1370 | stripe_start = stripe->physical; | 1370 | stripe_start = stripe->physical; |
1371 | if (physical >= stripe_start && | 1371 | if (physical >= stripe_start && |
1372 | physical < stripe_start + rbio->stripe_len && | 1372 | physical < stripe_start + rbio->stripe_len && |
1373 | stripe->dev->bdev && | ||
1373 | bio->bi_disk == stripe->dev->bdev->bd_disk && | 1374 | bio->bi_disk == stripe->dev->bdev->bd_disk && |
1374 | bio->bi_partno == stripe->dev->bdev->bd_partno) { | 1375 | bio->bi_partno == stripe->dev->bdev->bd_partno) { |
1375 | return i; | 1376 | return i; |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f0c3f00e97cb..cd2298d185dd 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -3268,8 +3268,22 @@ static int relocate_file_extent_cluster(struct inode *inode, | |||
3268 | nr++; | 3268 | nr++; |
3269 | } | 3269 | } |
3270 | 3270 | ||
3271 | btrfs_set_extent_delalloc(inode, page_start, page_end, 0, NULL, | 3271 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, |
3272 | 0); | 3272 | NULL, 0); |
3273 | if (ret) { | ||
3274 | unlock_page(page); | ||
3275 | put_page(page); | ||
3276 | btrfs_delalloc_release_metadata(BTRFS_I(inode), | ||
3277 | PAGE_SIZE); | ||
3278 | btrfs_delalloc_release_extents(BTRFS_I(inode), | ||
3279 | PAGE_SIZE); | ||
3280 | |||
3281 | clear_extent_bits(&BTRFS_I(inode)->io_tree, | ||
3282 | page_start, page_end, | ||
3283 | EXTENT_LOCKED | EXTENT_BOUNDARY); | ||
3284 | goto out; | ||
3285 | |||
3286 | } | ||
3273 | set_page_dirty(page); | 3287 | set_page_dirty(page); |
3274 | 3288 | ||
3275 | unlock_extent(&BTRFS_I(inode)->io_tree, | 3289 | unlock_extent(&BTRFS_I(inode)->io_tree, |
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index f306c608dc28..484e2af793de 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -5005,6 +5005,9 @@ static int send_hole(struct send_ctx *sctx, u64 end) | |||
5005 | u64 len; | 5005 | u64 len; |
5006 | int ret = 0; | 5006 | int ret = 0; |
5007 | 5007 | ||
5008 | if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) | ||
5009 | return send_update_extent(sctx, offset, end - offset); | ||
5010 | |||
5008 | p = fs_path_alloc(); | 5011 | p = fs_path_alloc(); |
5009 | if (!p) | 5012 | if (!p) |
5010 | return -ENOMEM; | 5013 | return -ENOMEM; |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 6e71a2a78363..4b817947e00f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -1545,7 +1545,7 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, | |||
1545 | * it for searching for existing supers, so this lets us do that and | 1545 | * it for searching for existing supers, so this lets us do that and |
1546 | * then open_ctree will properly initialize everything later. | 1546 | * then open_ctree will properly initialize everything later. |
1547 | */ | 1547 | */ |
1548 | fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); | 1548 | fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); |
1549 | if (!fs_info) { | 1549 | if (!fs_info) { |
1550 | error = -ENOMEM; | 1550 | error = -ENOMEM; |
1551 | goto error_sec_opts; | 1551 | goto error_sec_opts; |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 4fd19b4d6675..434457794c27 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -967,7 +967,9 @@ static noinline int backref_in_log(struct btrfs_root *log, | |||
967 | ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); | 967 | ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); |
968 | 968 | ||
969 | if (key->type == BTRFS_INODE_EXTREF_KEY) { | 969 | if (key->type == BTRFS_INODE_EXTREF_KEY) { |
970 | if (btrfs_find_name_in_ext_backref(path, ref_objectid, | 970 | if (btrfs_find_name_in_ext_backref(path->nodes[0], |
971 | path->slots[0], | ||
972 | ref_objectid, | ||
971 | name, namelen, NULL)) | 973 | name, namelen, NULL)) |
972 | match = 1; | 974 | match = 1; |
973 | 975 | ||
@@ -1191,7 +1193,8 @@ static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, | |||
1191 | read_extent_buffer(eb, *name, (unsigned long)&extref->name, | 1193 | read_extent_buffer(eb, *name, (unsigned long)&extref->name, |
1192 | *namelen); | 1194 | *namelen); |
1193 | 1195 | ||
1194 | *index = btrfs_inode_extref_index(eb, extref); | 1196 | if (index) |
1197 | *index = btrfs_inode_extref_index(eb, extref); | ||
1195 | if (parent_objectid) | 1198 | if (parent_objectid) |
1196 | *parent_objectid = btrfs_inode_extref_parent(eb, extref); | 1199 | *parent_objectid = btrfs_inode_extref_parent(eb, extref); |
1197 | 1200 | ||
@@ -1212,12 +1215,102 @@ static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, | |||
1212 | 1215 | ||
1213 | read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); | 1216 | read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); |
1214 | 1217 | ||
1215 | *index = btrfs_inode_ref_index(eb, ref); | 1218 | if (index) |
1219 | *index = btrfs_inode_ref_index(eb, ref); | ||
1216 | 1220 | ||
1217 | return 0; | 1221 | return 0; |
1218 | } | 1222 | } |
1219 | 1223 | ||
1220 | /* | 1224 | /* |
1225 | * Take an inode reference item from the log tree and iterate all names from the | ||
1226 | * inode reference item in the subvolume tree with the same key (if it exists). | ||
1227 | * For any name that is not in the inode reference item from the log tree, do a | ||
1228 | * proper unlink of that name (that is, remove its entry from the inode | ||
1229 | * reference item and both dir index keys). | ||
1230 | */ | ||
1231 | static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, | ||
1232 | struct btrfs_root *root, | ||
1233 | struct btrfs_path *path, | ||
1234 | struct btrfs_inode *inode, | ||
1235 | struct extent_buffer *log_eb, | ||
1236 | int log_slot, | ||
1237 | struct btrfs_key *key) | ||
1238 | { | ||
1239 | int ret; | ||
1240 | unsigned long ref_ptr; | ||
1241 | unsigned long ref_end; | ||
1242 | struct extent_buffer *eb; | ||
1243 | |||
1244 | again: | ||
1245 | btrfs_release_path(path); | ||
1246 | ret = btrfs_search_slot(NULL, root, key, path, 0, 0); | ||
1247 | if (ret > 0) { | ||
1248 | ret = 0; | ||
1249 | goto out; | ||
1250 | } | ||
1251 | if (ret < 0) | ||
1252 | goto out; | ||
1253 | |||
1254 | eb = path->nodes[0]; | ||
1255 | ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); | ||
1256 | ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]); | ||
1257 | while (ref_ptr < ref_end) { | ||
1258 | char *name = NULL; | ||
1259 | int namelen; | ||
1260 | u64 parent_id; | ||
1261 | |||
1262 | if (key->type == BTRFS_INODE_EXTREF_KEY) { | ||
1263 | ret = extref_get_fields(eb, ref_ptr, &namelen, &name, | ||
1264 | NULL, &parent_id); | ||
1265 | } else { | ||
1266 | parent_id = key->offset; | ||
1267 | ret = ref_get_fields(eb, ref_ptr, &namelen, &name, | ||
1268 | NULL); | ||
1269 | } | ||
1270 | if (ret) | ||
1271 | goto out; | ||
1272 | |||
1273 | if (key->type == BTRFS_INODE_EXTREF_KEY) | ||
1274 | ret = btrfs_find_name_in_ext_backref(log_eb, log_slot, | ||
1275 | parent_id, name, | ||
1276 | namelen, NULL); | ||
1277 | else | ||
1278 | ret = btrfs_find_name_in_backref(log_eb, log_slot, name, | ||
1279 | namelen, NULL); | ||
1280 | |||
1281 | if (!ret) { | ||
1282 | struct inode *dir; | ||
1283 | |||
1284 | btrfs_release_path(path); | ||
1285 | dir = read_one_inode(root, parent_id); | ||
1286 | if (!dir) { | ||
1287 | ret = -ENOENT; | ||
1288 | kfree(name); | ||
1289 | goto out; | ||
1290 | } | ||
1291 | ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), | ||
1292 | inode, name, namelen); | ||
1293 | kfree(name); | ||
1294 | iput(dir); | ||
1295 | if (ret) | ||
1296 | goto out; | ||
1297 | goto again; | ||
1298 | } | ||
1299 | |||
1300 | kfree(name); | ||
1301 | ref_ptr += namelen; | ||
1302 | if (key->type == BTRFS_INODE_EXTREF_KEY) | ||
1303 | ref_ptr += sizeof(struct btrfs_inode_extref); | ||
1304 | else | ||
1305 | ref_ptr += sizeof(struct btrfs_inode_ref); | ||
1306 | } | ||
1307 | ret = 0; | ||
1308 | out: | ||
1309 | btrfs_release_path(path); | ||
1310 | return ret; | ||
1311 | } | ||
1312 | |||
1313 | /* | ||
1221 | * replay one inode back reference item found in the log tree. | 1314 | * replay one inode back reference item found in the log tree. |
1222 | * eb, slot and key refer to the buffer and key found in the log tree. | 1315 | * eb, slot and key refer to the buffer and key found in the log tree. |
1223 | * root is the destination we are replaying into, and path is for temp | 1316 | * root is the destination we are replaying into, and path is for temp |
@@ -1345,6 +1438,19 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, | |||
1345 | } | 1438 | } |
1346 | } | 1439 | } |
1347 | 1440 | ||
1441 | /* | ||
1442 | * Before we overwrite the inode reference item in the subvolume tree | ||
1443 | * with the item from the log tree, we must unlink all names from the | ||
1444 | * parent directory that are in the subvolume's tree inode reference | ||
1445 | * item, otherwise we end up with an inconsistent subvolume tree where | ||
1446 | * dir index entries exist for a name but there is no inode reference | ||
1447 | * item with the same name. | ||
1448 | */ | ||
1449 | ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, | ||
1450 | key); | ||
1451 | if (ret) | ||
1452 | goto out; | ||
1453 | |||
1348 | /* finally write the back reference in the inode */ | 1454 | /* finally write the back reference in the inode */ |
1349 | ret = overwrite_item(trans, root, path, eb, slot, key); | 1455 | ret = overwrite_item(trans, root, path, eb, slot, key); |
1350 | out: | 1456 | out: |
@@ -5853,7 +5959,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans, | |||
5853 | * this will force the logging code to walk the dentry chain | 5959 | * this will force the logging code to walk the dentry chain |
5854 | * up for the file | 5960 | * up for the file |
5855 | */ | 5961 | */ |
5856 | if (S_ISREG(inode->vfs_inode.i_mode)) | 5962 | if (!S_ISDIR(inode->vfs_inode.i_mode)) |
5857 | inode->last_unlink_trans = trans->transid; | 5963 | inode->last_unlink_trans = trans->transid; |
5858 | 5964 | ||
5859 | /* | 5965 | /* |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 2ceb924ca0d6..b2d05c6b1c56 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -4829,10 +4829,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
4829 | ndevs = min(ndevs, devs_max); | 4829 | ndevs = min(ndevs, devs_max); |
4830 | 4830 | ||
4831 | /* | 4831 | /* |
4832 | * the primary goal is to maximize the number of stripes, so use as many | 4832 | * The primary goal is to maximize the number of stripes, so use as |
4833 | * devices as possible, even if the stripes are not maximum sized. | 4833 | * many devices as possible, even if the stripes are not maximum sized. |
4834 | * | ||
4835 | * The DUP profile stores more than one stripe per device, the | ||
4836 | * max_avail is the total size so we have to adjust. | ||
4834 | */ | 4837 | */ |
4835 | stripe_size = devices_info[ndevs-1].max_avail; | 4838 | stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes); |
4836 | num_stripes = ndevs * dev_stripes; | 4839 | num_stripes = ndevs * dev_stripes; |
4837 | 4840 | ||
4838 | /* | 4841 | /* |
@@ -4867,8 +4870,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
4867 | stripe_size = devices_info[ndevs-1].max_avail; | 4870 | stripe_size = devices_info[ndevs-1].max_avail; |
4868 | } | 4871 | } |
4869 | 4872 | ||
4870 | stripe_size = div_u64(stripe_size, dev_stripes); | ||
4871 | |||
4872 | /* align to BTRFS_STRIPE_LEN */ | 4873 | /* align to BTRFS_STRIPE_LEN */ |
4873 | stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN); | 4874 | stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN); |
4874 | 4875 | ||
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 6582c4507e6c..0e5bd3e3344e 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -3965,6 +3965,32 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode) | |||
3965 | } | 3965 | } |
3966 | 3966 | ||
3967 | /* | 3967 | /* |
3968 | * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it | ||
3969 | * looks like the link count will hit 0, drop any other caps (other | ||
3970 | * than PIN) we don't specifically want (due to the file still being | ||
3971 | * open). | ||
3972 | */ | ||
3973 | int ceph_drop_caps_for_unlink(struct inode *inode) | ||
3974 | { | ||
3975 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
3976 | int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; | ||
3977 | |||
3978 | spin_lock(&ci->i_ceph_lock); | ||
3979 | if (inode->i_nlink == 1) { | ||
3980 | drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); | ||
3981 | |||
3982 | ci->i_ceph_flags |= CEPH_I_NODELAY; | ||
3983 | if (__ceph_caps_dirty(ci)) { | ||
3984 | struct ceph_mds_client *mdsc = | ||
3985 | ceph_inode_to_client(inode)->mdsc; | ||
3986 | __cap_delay_requeue_front(mdsc, ci); | ||
3987 | } | ||
3988 | } | ||
3989 | spin_unlock(&ci->i_ceph_lock); | ||
3990 | return drop; | ||
3991 | } | ||
3992 | |||
3993 | /* | ||
3968 | * Helpers for embedding cap and dentry lease releases into mds | 3994 | * Helpers for embedding cap and dentry lease releases into mds |
3969 | * requests. | 3995 | * requests. |
3970 | * | 3996 | * |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 0c4346806e17..f1d9c6cc0491 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -1003,26 +1003,6 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir, | |||
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | /* | 1005 | /* |
1006 | * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it | ||
1007 | * looks like the link count will hit 0, drop any other caps (other | ||
1008 | * than PIN) we don't specifically want (due to the file still being | ||
1009 | * open). | ||
1010 | */ | ||
1011 | static int drop_caps_for_unlink(struct inode *inode) | ||
1012 | { | ||
1013 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
1014 | int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; | ||
1015 | |||
1016 | spin_lock(&ci->i_ceph_lock); | ||
1017 | if (inode->i_nlink == 1) { | ||
1018 | drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); | ||
1019 | ci->i_ceph_flags |= CEPH_I_NODELAY; | ||
1020 | } | ||
1021 | spin_unlock(&ci->i_ceph_lock); | ||
1022 | return drop; | ||
1023 | } | ||
1024 | |||
1025 | /* | ||
1026 | * rmdir and unlink are differ only by the metadata op code | 1006 | * rmdir and unlink are differ only by the metadata op code |
1027 | */ | 1007 | */ |
1028 | static int ceph_unlink(struct inode *dir, struct dentry *dentry) | 1008 | static int ceph_unlink(struct inode *dir, struct dentry *dentry) |
@@ -1056,7 +1036,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry) | |||
1056 | set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); | 1036 | set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); |
1057 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | 1037 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; |
1058 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | 1038 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; |
1059 | req->r_inode_drop = drop_caps_for_unlink(inode); | 1039 | req->r_inode_drop = ceph_drop_caps_for_unlink(inode); |
1060 | err = ceph_mdsc_do_request(mdsc, dir, req); | 1040 | err = ceph_mdsc_do_request(mdsc, dir, req); |
1061 | if (!err && !req->r_reply_info.head->is_dentry) | 1041 | if (!err && !req->r_reply_info.head->is_dentry) |
1062 | d_delete(dentry); | 1042 | d_delete(dentry); |
@@ -1104,8 +1084,10 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1104 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | 1084 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; |
1105 | /* release LINK_RDCACHE on source inode (mds will lock it) */ | 1085 | /* release LINK_RDCACHE on source inode (mds will lock it) */ |
1106 | req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; | 1086 | req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; |
1107 | if (d_really_is_positive(new_dentry)) | 1087 | if (d_really_is_positive(new_dentry)) { |
1108 | req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry)); | 1088 | req->r_inode_drop = |
1089 | ceph_drop_caps_for_unlink(d_inode(new_dentry)); | ||
1090 | } | ||
1109 | err = ceph_mdsc_do_request(mdsc, old_dir, req); | 1091 | err = ceph_mdsc_do_request(mdsc, old_dir, req); |
1110 | if (!err && !req->r_reply_info.head->is_dentry) { | 1092 | if (!err && !req->r_reply_info.head->is_dentry) { |
1111 | /* | 1093 | /* |
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index a62d2a9841dc..fb2bc9c15a23 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
@@ -225,6 +225,7 @@ static int parse_fsopt_token(char *c, void *private) | |||
225 | return -ENOMEM; | 225 | return -ENOMEM; |
226 | break; | 226 | break; |
227 | case Opt_mds_namespace: | 227 | case Opt_mds_namespace: |
228 | kfree(fsopt->mds_namespace); | ||
228 | fsopt->mds_namespace = kstrndup(argstr[0].from, | 229 | fsopt->mds_namespace = kstrndup(argstr[0].from, |
229 | argstr[0].to-argstr[0].from, | 230 | argstr[0].to-argstr[0].from, |
230 | GFP_KERNEL); | 231 | GFP_KERNEL); |
@@ -232,6 +233,7 @@ static int parse_fsopt_token(char *c, void *private) | |||
232 | return -ENOMEM; | 233 | return -ENOMEM; |
233 | break; | 234 | break; |
234 | case Opt_fscache_uniq: | 235 | case Opt_fscache_uniq: |
236 | kfree(fsopt->fscache_uniq); | ||
235 | fsopt->fscache_uniq = kstrndup(argstr[0].from, | 237 | fsopt->fscache_uniq = kstrndup(argstr[0].from, |
236 | argstr[0].to-argstr[0].from, | 238 | argstr[0].to-argstr[0].from, |
237 | GFP_KERNEL); | 239 | GFP_KERNEL); |
@@ -711,14 +713,17 @@ static int __init init_caches(void) | |||
711 | goto bad_dentry; | 713 | goto bad_dentry; |
712 | 714 | ||
713 | ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD); | 715 | ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD); |
714 | |||
715 | if (!ceph_file_cachep) | 716 | if (!ceph_file_cachep) |
716 | goto bad_file; | 717 | goto bad_file; |
717 | 718 | ||
718 | if ((error = ceph_fscache_register())) | 719 | error = ceph_fscache_register(); |
719 | goto bad_file; | 720 | if (error) |
721 | goto bad_fscache; | ||
720 | 722 | ||
721 | return 0; | 723 | return 0; |
724 | |||
725 | bad_fscache: | ||
726 | kmem_cache_destroy(ceph_file_cachep); | ||
722 | bad_file: | 727 | bad_file: |
723 | kmem_cache_destroy(ceph_dentry_cachep); | 728 | kmem_cache_destroy(ceph_dentry_cachep); |
724 | bad_dentry: | 729 | bad_dentry: |
@@ -836,7 +841,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) | |||
836 | int err; | 841 | int err; |
837 | unsigned long started = jiffies; /* note the start time */ | 842 | unsigned long started = jiffies; /* note the start time */ |
838 | struct dentry *root; | 843 | struct dentry *root; |
839 | int first = 0; /* first vfsmount for this super_block */ | ||
840 | 844 | ||
841 | dout("mount start %p\n", fsc); | 845 | dout("mount start %p\n", fsc); |
842 | mutex_lock(&fsc->client->mount_mutex); | 846 | mutex_lock(&fsc->client->mount_mutex); |
@@ -861,17 +865,17 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) | |||
861 | path = fsc->mount_options->server_path + 1; | 865 | path = fsc->mount_options->server_path + 1; |
862 | dout("mount opening path %s\n", path); | 866 | dout("mount opening path %s\n", path); |
863 | } | 867 | } |
868 | |||
869 | err = ceph_fs_debugfs_init(fsc); | ||
870 | if (err < 0) | ||
871 | goto out; | ||
872 | |||
864 | root = open_root_dentry(fsc, path, started); | 873 | root = open_root_dentry(fsc, path, started); |
865 | if (IS_ERR(root)) { | 874 | if (IS_ERR(root)) { |
866 | err = PTR_ERR(root); | 875 | err = PTR_ERR(root); |
867 | goto out; | 876 | goto out; |
868 | } | 877 | } |
869 | fsc->sb->s_root = dget(root); | 878 | fsc->sb->s_root = dget(root); |
870 | first = 1; | ||
871 | |||
872 | err = ceph_fs_debugfs_init(fsc); | ||
873 | if (err < 0) | ||
874 | goto fail; | ||
875 | } else { | 879 | } else { |
876 | root = dget(fsc->sb->s_root); | 880 | root = dget(fsc->sb->s_root); |
877 | } | 881 | } |
@@ -881,11 +885,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) | |||
881 | mutex_unlock(&fsc->client->mount_mutex); | 885 | mutex_unlock(&fsc->client->mount_mutex); |
882 | return root; | 886 | return root; |
883 | 887 | ||
884 | fail: | ||
885 | if (first) { | ||
886 | dput(fsc->sb->s_root); | ||
887 | fsc->sb->s_root = NULL; | ||
888 | } | ||
889 | out: | 888 | out: |
890 | mutex_unlock(&fsc->client->mount_mutex); | 889 | mutex_unlock(&fsc->client->mount_mutex); |
891 | return ERR_PTR(err); | 890 | return ERR_PTR(err); |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 21b2e5b004eb..1c2086e0fec2 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -987,7 +987,7 @@ extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, | |||
987 | struct ceph_mds_session *session); | 987 | struct ceph_mds_session *session); |
988 | extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); | 988 | extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); |
989 | extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc); | 989 | extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc); |
990 | 990 | extern int ceph_drop_caps_for_unlink(struct inode *inode); | |
991 | extern int ceph_encode_inode_release(void **p, struct inode *inode, | 991 | extern int ceph_encode_inode_release(void **p, struct inode *inode, |
992 | int mds, int drop, int unless, int force); | 992 | int mds, int drop, int unless, int force); |
993 | extern int ceph_encode_dentry_release(void **p, struct dentry *dn, | 993 | extern int ceph_encode_dentry_release(void **p, struct dentry *dn, |
diff --git a/fs/dcache.c b/fs/dcache.c index 7c38f39958bc..8945e6cabd93 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -647,11 +647,16 @@ again: | |||
647 | spin_unlock(&parent->d_lock); | 647 | spin_unlock(&parent->d_lock); |
648 | goto again; | 648 | goto again; |
649 | } | 649 | } |
650 | rcu_read_unlock(); | 650 | if (parent != dentry) { |
651 | if (parent != dentry) | ||
652 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); | 651 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
653 | else | 652 | if (unlikely(dentry->d_lockref.count < 0)) { |
653 | spin_unlock(&parent->d_lock); | ||
654 | parent = NULL; | ||
655 | } | ||
656 | } else { | ||
654 | parent = NULL; | 657 | parent = NULL; |
658 | } | ||
659 | rcu_read_unlock(); | ||
655 | return parent; | 660 | return parent; |
656 | } | 661 | } |
657 | 662 | ||
@@ -2474,7 +2479,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, | |||
2474 | 2479 | ||
2475 | retry: | 2480 | retry: |
2476 | rcu_read_lock(); | 2481 | rcu_read_lock(); |
2477 | seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1; | 2482 | seq = smp_load_acquire(&parent->d_inode->i_dir_seq); |
2478 | r_seq = read_seqbegin(&rename_lock); | 2483 | r_seq = read_seqbegin(&rename_lock); |
2479 | dentry = __d_lookup_rcu(parent, name, &d_seq); | 2484 | dentry = __d_lookup_rcu(parent, name, &d_seq); |
2480 | if (unlikely(dentry)) { | 2485 | if (unlikely(dentry)) { |
@@ -2495,8 +2500,14 @@ retry: | |||
2495 | rcu_read_unlock(); | 2500 | rcu_read_unlock(); |
2496 | goto retry; | 2501 | goto retry; |
2497 | } | 2502 | } |
2503 | |||
2504 | if (unlikely(seq & 1)) { | ||
2505 | rcu_read_unlock(); | ||
2506 | goto retry; | ||
2507 | } | ||
2508 | |||
2498 | hlist_bl_lock(b); | 2509 | hlist_bl_lock(b); |
2499 | if (unlikely(parent->d_inode->i_dir_seq != seq)) { | 2510 | if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { |
2500 | hlist_bl_unlock(b); | 2511 | hlist_bl_unlock(b); |
2501 | rcu_read_unlock(); | 2512 | rcu_read_unlock(); |
2502 | goto retry; | 2513 | goto retry; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index a0ca9e48e993..1357ef563893 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -1274,8 +1274,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1274 | */ | 1274 | */ |
1275 | if (dio->is_async && iov_iter_rw(iter) == WRITE) { | 1275 | if (dio->is_async && iov_iter_rw(iter) == WRITE) { |
1276 | retval = 0; | 1276 | retval = 0; |
1277 | if ((iocb->ki_filp->f_flags & O_DSYNC) || | 1277 | if (iocb->ki_flags & IOCB_DSYNC) |
1278 | IS_SYNC(iocb->ki_filp->f_mapping->host)) | ||
1279 | retval = dio_set_defer_completion(dio); | 1278 | retval = dio_set_defer_completion(dio); |
1280 | else if (!dio->inode->i_sb->s_dio_done_wq) { | 1279 | else if (!dio->inode->i_sb->s_dio_done_wq) { |
1281 | /* | 1280 | /* |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 86d6a4435c87..51f940e76c5e 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
@@ -807,9 +807,6 @@ do_alloc: | |||
807 | iomap->length = hole_size(inode, lblock, &mp); | 807 | iomap->length = hole_size(inode, lblock, &mp); |
808 | else | 808 | else |
809 | iomap->length = size - pos; | 809 | iomap->length = size - pos; |
810 | } else { | ||
811 | if (height <= ip->i_height) | ||
812 | iomap->length = hole_size(inode, lblock, &mp); | ||
813 | } | 810 | } |
814 | goto out_release; | 811 | goto out_release; |
815 | } | 812 | } |
diff --git a/fs/namei.c b/fs/namei.c index 921ae32dbc80..cafa365eeb70 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -559,9 +559,10 @@ static int __nd_alloc_stack(struct nameidata *nd) | |||
559 | static bool path_connected(const struct path *path) | 559 | static bool path_connected(const struct path *path) |
560 | { | 560 | { |
561 | struct vfsmount *mnt = path->mnt; | 561 | struct vfsmount *mnt = path->mnt; |
562 | struct super_block *sb = mnt->mnt_sb; | ||
562 | 563 | ||
563 | /* Only bind mounts can have disconnected paths */ | 564 | /* Bind mounts and multi-root filesystems can have disconnected paths */ |
564 | if (mnt->mnt_root == mnt->mnt_sb->s_root) | 565 | if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root)) |
565 | return true; | 566 | return true; |
566 | 567 | ||
567 | return is_subdir(path->dentry, mnt->mnt_root); | 568 | return is_subdir(path->dentry, mnt->mnt_root); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 8c10b0562e75..621c517b325c 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -86,10 +86,10 @@ struct nfs_direct_req { | |||
86 | struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX]; | 86 | struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX]; |
87 | int mirror_count; | 87 | int mirror_count; |
88 | 88 | ||
89 | loff_t io_start; /* Start offset for I/O */ | ||
89 | ssize_t count, /* bytes actually processed */ | 90 | ssize_t count, /* bytes actually processed */ |
90 | max_count, /* max expected count */ | 91 | max_count, /* max expected count */ |
91 | bytes_left, /* bytes left to be sent */ | 92 | bytes_left, /* bytes left to be sent */ |
92 | io_start, /* start of IO */ | ||
93 | error; /* any reported error */ | 93 | error; /* any reported error */ |
94 | struct completion completion; /* wait for i/o completion */ | 94 | struct completion completion; /* wait for i/o completion */ |
95 | 95 | ||
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index c13e826614b5..ee723aa153a3 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -292,8 +292,11 @@ pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo) | |||
292 | void | 292 | void |
293 | pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) | 293 | pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) |
294 | { | 294 | { |
295 | struct inode *inode = lo->plh_inode; | 295 | struct inode *inode; |
296 | 296 | ||
297 | if (!lo) | ||
298 | return; | ||
299 | inode = lo->plh_inode; | ||
297 | pnfs_layoutreturn_before_put_layout_hdr(lo); | 300 | pnfs_layoutreturn_before_put_layout_hdr(lo); |
298 | 301 | ||
299 | if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) { | 302 | if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) { |
@@ -1241,10 +1244,12 @@ retry: | |||
1241 | spin_lock(&ino->i_lock); | 1244 | spin_lock(&ino->i_lock); |
1242 | lo = nfsi->layout; | 1245 | lo = nfsi->layout; |
1243 | if (!lo || !pnfs_layout_is_valid(lo) || | 1246 | if (!lo || !pnfs_layout_is_valid(lo) || |
1244 | test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) | 1247 | test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { |
1248 | lo = NULL; | ||
1245 | goto out_noroc; | 1249 | goto out_noroc; |
1250 | } | ||
1251 | pnfs_get_layout_hdr(lo); | ||
1246 | if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) { | 1252 | if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) { |
1247 | pnfs_get_layout_hdr(lo); | ||
1248 | spin_unlock(&ino->i_lock); | 1253 | spin_unlock(&ino->i_lock); |
1249 | wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, | 1254 | wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, |
1250 | TASK_UNINTERRUPTIBLE); | 1255 | TASK_UNINTERRUPTIBLE); |
@@ -1312,10 +1317,12 @@ out_noroc: | |||
1312 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld; | 1317 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld; |
1313 | if (ld->prepare_layoutreturn) | 1318 | if (ld->prepare_layoutreturn) |
1314 | ld->prepare_layoutreturn(args); | 1319 | ld->prepare_layoutreturn(args); |
1320 | pnfs_put_layout_hdr(lo); | ||
1315 | return true; | 1321 | return true; |
1316 | } | 1322 | } |
1317 | if (layoutreturn) | 1323 | if (layoutreturn) |
1318 | pnfs_send_layoutreturn(lo, &stateid, iomode, true); | 1324 | pnfs_send_layoutreturn(lo, &stateid, iomode, true); |
1325 | pnfs_put_layout_hdr(lo); | ||
1319 | return false; | 1326 | return false; |
1320 | } | 1327 | } |
1321 | 1328 | ||
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 29bacdc56f6a..5e470e233c83 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -2631,6 +2631,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server, | |||
2631 | /* initial superblock/root creation */ | 2631 | /* initial superblock/root creation */ |
2632 | mount_info->fill_super(s, mount_info); | 2632 | mount_info->fill_super(s, mount_info); |
2633 | nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned); | 2633 | nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned); |
2634 | if (!(server->flags & NFS_MOUNT_UNSHARED)) | ||
2635 | s->s_iflags |= SB_I_MULTIROOT; | ||
2634 | } | 2636 | } |
2635 | 2637 | ||
2636 | mntroot = nfs_get_root(s, mount_info->mntfh, dev_name); | 2638 | mntroot = nfs_get_root(s, mount_info->mntfh, dev_name); |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 7428a669d7a7..e7d8ceae8f26 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1876,40 +1876,43 @@ int nfs_generic_commit_list(struct inode *inode, struct list_head *head, | |||
1876 | return status; | 1876 | return status; |
1877 | } | 1877 | } |
1878 | 1878 | ||
1879 | int nfs_commit_inode(struct inode *inode, int how) | 1879 | static int __nfs_commit_inode(struct inode *inode, int how, |
1880 | struct writeback_control *wbc) | ||
1880 | { | 1881 | { |
1881 | LIST_HEAD(head); | 1882 | LIST_HEAD(head); |
1882 | struct nfs_commit_info cinfo; | 1883 | struct nfs_commit_info cinfo; |
1883 | int may_wait = how & FLUSH_SYNC; | 1884 | int may_wait = how & FLUSH_SYNC; |
1884 | int error = 0; | 1885 | int ret, nscan; |
1885 | int res; | ||
1886 | 1886 | ||
1887 | nfs_init_cinfo_from_inode(&cinfo, inode); | 1887 | nfs_init_cinfo_from_inode(&cinfo, inode); |
1888 | nfs_commit_begin(cinfo.mds); | 1888 | nfs_commit_begin(cinfo.mds); |
1889 | res = nfs_scan_commit(inode, &head, &cinfo); | 1889 | for (;;) { |
1890 | if (res) | 1890 | ret = nscan = nfs_scan_commit(inode, &head, &cinfo); |
1891 | error = nfs_generic_commit_list(inode, &head, how, &cinfo); | 1891 | if (ret <= 0) |
1892 | break; | ||
1893 | ret = nfs_generic_commit_list(inode, &head, how, &cinfo); | ||
1894 | if (ret < 0) | ||
1895 | break; | ||
1896 | ret = 0; | ||
1897 | if (wbc && wbc->sync_mode == WB_SYNC_NONE) { | ||
1898 | if (nscan < wbc->nr_to_write) | ||
1899 | wbc->nr_to_write -= nscan; | ||
1900 | else | ||
1901 | wbc->nr_to_write = 0; | ||
1902 | } | ||
1903 | if (nscan < INT_MAX) | ||
1904 | break; | ||
1905 | cond_resched(); | ||
1906 | } | ||
1892 | nfs_commit_end(cinfo.mds); | 1907 | nfs_commit_end(cinfo.mds); |
1893 | if (res == 0) | 1908 | if (ret || !may_wait) |
1894 | return res; | 1909 | return ret; |
1895 | if (error < 0) | 1910 | return wait_on_commit(cinfo.mds); |
1896 | goto out_error; | 1911 | } |
1897 | if (!may_wait) | 1912 | |
1898 | goto out_mark_dirty; | 1913 | int nfs_commit_inode(struct inode *inode, int how) |
1899 | error = wait_on_commit(cinfo.mds); | 1914 | { |
1900 | if (error < 0) | 1915 | return __nfs_commit_inode(inode, how, NULL); |
1901 | return error; | ||
1902 | return res; | ||
1903 | out_error: | ||
1904 | res = error; | ||
1905 | /* Note: If we exit without ensuring that the commit is complete, | ||
1906 | * we must mark the inode as dirty. Otherwise, future calls to | ||
1907 | * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure | ||
1908 | * that the data is on the disk. | ||
1909 | */ | ||
1910 | out_mark_dirty: | ||
1911 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | ||
1912 | return res; | ||
1913 | } | 1916 | } |
1914 | EXPORT_SYMBOL_GPL(nfs_commit_inode); | 1917 | EXPORT_SYMBOL_GPL(nfs_commit_inode); |
1915 | 1918 | ||
@@ -1919,11 +1922,11 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
1919 | int flags = FLUSH_SYNC; | 1922 | int flags = FLUSH_SYNC; |
1920 | int ret = 0; | 1923 | int ret = 0; |
1921 | 1924 | ||
1922 | /* no commits means nothing needs to be done */ | ||
1923 | if (!atomic_long_read(&nfsi->commit_info.ncommit)) | ||
1924 | return ret; | ||
1925 | |||
1926 | if (wbc->sync_mode == WB_SYNC_NONE) { | 1925 | if (wbc->sync_mode == WB_SYNC_NONE) { |
1926 | /* no commits means nothing needs to be done */ | ||
1927 | if (!atomic_long_read(&nfsi->commit_info.ncommit)) | ||
1928 | goto check_requests_outstanding; | ||
1929 | |||
1927 | /* Don't commit yet if this is a non-blocking flush and there | 1930 | /* Don't commit yet if this is a non-blocking flush and there |
1928 | * are a lot of outstanding writes for this mapping. | 1931 | * are a lot of outstanding writes for this mapping. |
1929 | */ | 1932 | */ |
@@ -1934,16 +1937,16 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
1934 | flags = 0; | 1937 | flags = 0; |
1935 | } | 1938 | } |
1936 | 1939 | ||
1937 | ret = nfs_commit_inode(inode, flags); | 1940 | ret = __nfs_commit_inode(inode, flags, wbc); |
1938 | if (ret >= 0) { | 1941 | if (!ret) { |
1939 | if (wbc->sync_mode == WB_SYNC_NONE) { | 1942 | if (flags & FLUSH_SYNC) |
1940 | if (ret < wbc->nr_to_write) | 1943 | return 0; |
1941 | wbc->nr_to_write -= ret; | 1944 | } else if (atomic_long_read(&nfsi->commit_info.ncommit)) |
1942 | else | 1945 | goto out_mark_dirty; |
1943 | wbc->nr_to_write = 0; | 1946 | |
1944 | } | 1947 | check_requests_outstanding: |
1945 | return 0; | 1948 | if (!atomic_read(&nfsi->commit_info.rpcs_out)) |
1946 | } | 1949 | return ret; |
1947 | out_mark_dirty: | 1950 | out_mark_dirty: |
1948 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | 1951 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
1949 | return ret; | 1952 | return ret; |
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig index 406e72de88f6..ce6ff5a0a6e4 100644 --- a/fs/overlayfs/Kconfig +++ b/fs/overlayfs/Kconfig | |||
@@ -24,6 +24,8 @@ config OVERLAY_FS_REDIRECT_DIR | |||
24 | an overlay which has redirects on a kernel that doesn't support this | 24 | an overlay which has redirects on a kernel that doesn't support this |
25 | feature will have unexpected results. | 25 | feature will have unexpected results. |
26 | 26 | ||
27 | If unsure, say N. | ||
28 | |||
27 | config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW | 29 | config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW |
28 | bool "Overlayfs: follow redirects even if redirects are turned off" | 30 | bool "Overlayfs: follow redirects even if redirects are turned off" |
29 | default y | 31 | default y |
@@ -32,8 +34,13 @@ config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW | |||
32 | Disable this to get a possibly more secure configuration, but that | 34 | Disable this to get a possibly more secure configuration, but that |
33 | might not be backward compatible with previous kernels. | 35 | might not be backward compatible with previous kernels. |
34 | 36 | ||
37 | If backward compatibility is not an issue, then it is safe and | ||
38 | recommended to say N here. | ||
39 | |||
35 | For more information, see Documentation/filesystems/overlayfs.txt | 40 | For more information, see Documentation/filesystems/overlayfs.txt |
36 | 41 | ||
42 | If unsure, say Y. | ||
43 | |||
37 | config OVERLAY_FS_INDEX | 44 | config OVERLAY_FS_INDEX |
38 | bool "Overlayfs: turn on inodes index feature by default" | 45 | bool "Overlayfs: turn on inodes index feature by default" |
39 | depends on OVERLAY_FS | 46 | depends on OVERLAY_FS |
@@ -51,6 +58,8 @@ config OVERLAY_FS_INDEX | |||
51 | That is, mounting an overlay which has an inodes index on a kernel | 58 | That is, mounting an overlay which has an inodes index on a kernel |
52 | that doesn't support this feature will have unexpected results. | 59 | that doesn't support this feature will have unexpected results. |
53 | 60 | ||
61 | If unsure, say N. | ||
62 | |||
54 | config OVERLAY_FS_NFS_EXPORT | 63 | config OVERLAY_FS_NFS_EXPORT |
55 | bool "Overlayfs: turn on NFS export feature by default" | 64 | bool "Overlayfs: turn on NFS export feature by default" |
56 | depends on OVERLAY_FS | 65 | depends on OVERLAY_FS |
@@ -72,3 +81,8 @@ config OVERLAY_FS_NFS_EXPORT | |||
72 | Note, that the NFS export feature is not backward compatible. | 81 | Note, that the NFS export feature is not backward compatible. |
73 | That is, mounting an overlay which has a full index on a kernel | 82 | That is, mounting an overlay which has a full index on a kernel |
74 | that doesn't support this feature will have unexpected results. | 83 | that doesn't support this feature will have unexpected results. |
84 | |||
85 | Most users should say N here and enable this feature on a case-by- | ||
86 | case basis with the "nfs_export=on" mount option. | ||
87 | |||
88 | Say N unless you fully understand the consequences. | ||
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index bb94ce9da5c8..87bd4148f4fb 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c | |||
@@ -19,6 +19,142 @@ | |||
19 | #include <linux/ratelimit.h> | 19 | #include <linux/ratelimit.h> |
20 | #include "overlayfs.h" | 20 | #include "overlayfs.h" |
21 | 21 | ||
22 | static int ovl_encode_maybe_copy_up(struct dentry *dentry) | ||
23 | { | ||
24 | int err; | ||
25 | |||
26 | if (ovl_dentry_upper(dentry)) | ||
27 | return 0; | ||
28 | |||
29 | err = ovl_want_write(dentry); | ||
30 | if (!err) { | ||
31 | err = ovl_copy_up(dentry); | ||
32 | ovl_drop_write(dentry); | ||
33 | } | ||
34 | |||
35 | if (err) { | ||
36 | pr_warn_ratelimited("overlayfs: failed to copy up on encode (%pd2, err=%i)\n", | ||
37 | dentry, err); | ||
38 | } | ||
39 | |||
40 | return err; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Before encoding a non-upper directory file handle from real layer N, we need | ||
45 | * to check if it will be possible to reconnect an overlay dentry from the real | ||
46 | * lower decoded dentry. This is done by following the overlay ancestry up to a | ||
47 | * "layer N connected" ancestor and verifying that all parents along the way are | ||
48 | * "layer N connectable". If an ancestor that is NOT "layer N connectable" is | ||
49 | * found, we need to copy up an ancestor, which is "layer N connectable", thus | ||
50 | * making that ancestor "layer N connected". For example: | ||
51 | * | ||
52 | * layer 1: /a | ||
53 | * layer 2: /a/b/c | ||
54 | * | ||
55 | * The overlay dentry /a is NOT "layer 2 connectable", because if dir /a is | ||
56 | * copied up and renamed, upper dir /a will be indexed by lower dir /a from | ||
57 | * layer 1. The dir /a from layer 2 will never be indexed, so the algorithm (*) | ||
58 | * in ovl_lookup_real_ancestor() will not be able to lookup a connected overlay | ||
59 | * dentry from the connected lower dentry /a/b/c. | ||
60 | * | ||
61 | * To avoid this problem on decode time, we need to copy up an ancestor of | ||
62 | * /a/b/c, which is "layer 2 connectable", on encode time. That ancestor is | ||
63 | * /a/b. After copy up (and index) of /a/b, it will become "layer 2 connected" | ||
64 | * and when the time comes to decode the file handle from lower dentry /a/b/c, | ||
65 | * ovl_lookup_real_ancestor() will find the indexed ancestor /a/b and decoding | ||
66 | * a connected overlay dentry will be accomplished. | ||
67 | * | ||
68 | * (*) the algorithm in ovl_lookup_real_ancestor() can be improved to lookup an | ||
69 | * entry /a in the lower layers above layer N and find the indexed dir /a from | ||
70 | * layer 1. If that improvement is made, then the check for "layer N connected" | ||
71 | * will need to verify there are no redirects in lower layers above N. In the | ||
72 | * example above, /a will be "layer 2 connectable". However, if layer 2 dir /a | ||
73 | * is a target of a layer 1 redirect, then /a will NOT be "layer 2 connectable": | ||
74 | * | ||
75 | * layer 1: /A (redirect = /a) | ||
76 | * layer 2: /a/b/c | ||
77 | */ | ||
78 | |||
79 | /* Return the lowest layer for encoding a connectable file handle */ | ||
80 | static int ovl_connectable_layer(struct dentry *dentry) | ||
81 | { | ||
82 | struct ovl_entry *oe = OVL_E(dentry); | ||
83 | |||
84 | /* We can get overlay root from root of any layer */ | ||
85 | if (dentry == dentry->d_sb->s_root) | ||
86 | return oe->numlower; | ||
87 | |||
88 | /* | ||
89 | * If it's an unindexed merge dir, then it's not connectable with any | ||
90 | * lower layer | ||
91 | */ | ||
92 | if (ovl_dentry_upper(dentry) && | ||
93 | !ovl_test_flag(OVL_INDEX, d_inode(dentry))) | ||
94 | return 0; | ||
95 | |||
96 | /* We can get upper/overlay path from indexed/lower dentry */ | ||
97 | return oe->lowerstack[0].layer->idx; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * @dentry is "connected" if all ancestors up to root or a "connected" ancestor | ||
102 | * have the same uppermost lower layer as the origin's layer. We may need to | ||
103 | * copy up a "connectable" ancestor to make it "connected". A "connected" dentry | ||
104 | * cannot become non "connected", so cache positive result in dentry flags. | ||
105 | * | ||
106 | * Return the connected origin layer or < 0 on error. | ||
107 | */ | ||
108 | static int ovl_connect_layer(struct dentry *dentry) | ||
109 | { | ||
110 | struct dentry *next, *parent = NULL; | ||
111 | int origin_layer; | ||
112 | int err = 0; | ||
113 | |||
114 | if (WARN_ON(dentry == dentry->d_sb->s_root) || | ||
115 | WARN_ON(!ovl_dentry_lower(dentry))) | ||
116 | return -EIO; | ||
117 | |||
118 | origin_layer = OVL_E(dentry)->lowerstack[0].layer->idx; | ||
119 | if (ovl_dentry_test_flag(OVL_E_CONNECTED, dentry)) | ||
120 | return origin_layer; | ||
121 | |||
122 | /* Find the topmost origin layer connectable ancestor of @dentry */ | ||
123 | next = dget(dentry); | ||
124 | for (;;) { | ||
125 | parent = dget_parent(next); | ||
126 | if (WARN_ON(parent == next)) { | ||
127 | err = -EIO; | ||
128 | break; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * If @parent is not origin layer connectable, then copy up | ||
133 | * @next which is origin layer connectable and we are done. | ||
134 | */ | ||
135 | if (ovl_connectable_layer(parent) < origin_layer) { | ||
136 | err = ovl_encode_maybe_copy_up(next); | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | /* If @parent is connected or indexed we are done */ | ||
141 | if (ovl_dentry_test_flag(OVL_E_CONNECTED, parent) || | ||
142 | ovl_test_flag(OVL_INDEX, d_inode(parent))) | ||
143 | break; | ||
144 | |||
145 | dput(next); | ||
146 | next = parent; | ||
147 | } | ||
148 | |||
149 | dput(parent); | ||
150 | dput(next); | ||
151 | |||
152 | if (!err) | ||
153 | ovl_dentry_set_flag(OVL_E_CONNECTED, dentry); | ||
154 | |||
155 | return err ?: origin_layer; | ||
156 | } | ||
157 | |||
22 | /* | 158 | /* |
23 | * We only need to encode origin if there is a chance that the same object was | 159 | * We only need to encode origin if there is a chance that the same object was |
24 | * encoded pre copy up and then we need to stay consistent with the same | 160 | * encoded pre copy up and then we need to stay consistent with the same |
@@ -41,73 +177,59 @@ | |||
41 | * L = lower file handle | 177 | * L = lower file handle |
42 | * | 178 | * |
43 | * (*) Connecting an overlay dir from real lower dentry is not always | 179 | * (*) Connecting an overlay dir from real lower dentry is not always |
44 | * possible when there are redirects in lower layers. To mitigate this case, | 180 | * possible when there are redirects in lower layers and non-indexed merge dirs. |
45 | * we copy up the lower dir first and then encode an upper dir file handle. | 181 | * To mitigate those case, we may copy up the lower dir ancestor before encode |
182 | * a lower dir file handle. | ||
183 | * | ||
184 | * Return 0 for upper file handle, > 0 for lower file handle or < 0 on error. | ||
46 | */ | 185 | */ |
47 | static bool ovl_should_encode_origin(struct dentry *dentry) | 186 | static int ovl_check_encode_origin(struct dentry *dentry) |
48 | { | 187 | { |
49 | struct ovl_fs *ofs = dentry->d_sb->s_fs_info; | 188 | struct ovl_fs *ofs = dentry->d_sb->s_fs_info; |
50 | 189 | ||
190 | /* Upper file handle for pure upper */ | ||
51 | if (!ovl_dentry_lower(dentry)) | 191 | if (!ovl_dentry_lower(dentry)) |
52 | return false; | 192 | return 0; |
53 | 193 | ||
54 | /* | 194 | /* |
55 | * Decoding a merge dir, whose origin's parent is under a redirected | 195 | * Upper file handle for non-indexed upper. |
56 | * lower dir is not always possible. As a simple aproximation, we do | ||
57 | * not encode lower dir file handles when overlay has multiple lower | ||
58 | * layers and origin is below the topmost lower layer. | ||
59 | * | 196 | * |
60 | * TODO: copy up only the parent that is under redirected lower. | 197 | * Root is never indexed, so if there's an upper layer, encode upper for |
198 | * root. | ||
61 | */ | 199 | */ |
62 | if (d_is_dir(dentry) && ofs->upper_mnt && | ||
63 | OVL_E(dentry)->lowerstack[0].layer->idx > 1) | ||
64 | return false; | ||
65 | |||
66 | /* Decoding a non-indexed upper from origin is not implemented */ | ||
67 | if (ovl_dentry_upper(dentry) && | 200 | if (ovl_dentry_upper(dentry) && |
68 | !ovl_test_flag(OVL_INDEX, d_inode(dentry))) | 201 | !ovl_test_flag(OVL_INDEX, d_inode(dentry))) |
69 | return false; | ||
70 | |||
71 | return true; | ||
72 | } | ||
73 | |||
74 | static int ovl_encode_maybe_copy_up(struct dentry *dentry) | ||
75 | { | ||
76 | int err; | ||
77 | |||
78 | if (ovl_dentry_upper(dentry)) | ||
79 | return 0; | 202 | return 0; |
80 | 203 | ||
81 | err = ovl_want_write(dentry); | 204 | /* |
82 | if (err) | 205 | * Decoding a merge dir, whose origin's ancestor is under a redirected |
83 | return err; | 206 | * lower dir or under a non-indexed upper is not always possible. |
84 | 207 | * ovl_connect_layer() will try to make origin's layer "connected" by | |
85 | err = ovl_copy_up(dentry); | 208 | * copying up a "connectable" ancestor. |
209 | */ | ||
210 | if (d_is_dir(dentry) && ofs->upper_mnt) | ||
211 | return ovl_connect_layer(dentry); | ||
86 | 212 | ||
87 | ovl_drop_write(dentry); | 213 | /* Lower file handle for indexed and non-upper dir/non-dir */ |
88 | return err; | 214 | return 1; |
89 | } | 215 | } |
90 | 216 | ||
91 | static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen) | 217 | static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen) |
92 | { | 218 | { |
93 | struct dentry *origin = ovl_dentry_lower(dentry); | ||
94 | struct ovl_fh *fh = NULL; | 219 | struct ovl_fh *fh = NULL; |
95 | int err; | 220 | int err, enc_lower; |
96 | 221 | ||
97 | /* | 222 | /* |
98 | * If we should not encode a lower dir file handle, copy up and encode | 223 | * Check if we should encode a lower or upper file handle and maybe |
99 | * an upper dir file handle. | 224 | * copy up an ancestor to make lower file handle connectable. |
100 | */ | 225 | */ |
101 | if (!ovl_should_encode_origin(dentry)) { | 226 | err = enc_lower = ovl_check_encode_origin(dentry); |
102 | err = ovl_encode_maybe_copy_up(dentry); | 227 | if (enc_lower < 0) |
103 | if (err) | 228 | goto fail; |
104 | goto fail; | ||
105 | |||
106 | origin = NULL; | ||
107 | } | ||
108 | 229 | ||
109 | /* Encode an upper or origin file handle */ | 230 | /* Encode an upper or lower file handle */ |
110 | fh = ovl_encode_fh(origin ?: ovl_dentry_upper(dentry), !origin); | 231 | fh = ovl_encode_fh(enc_lower ? ovl_dentry_lower(dentry) : |
232 | ovl_dentry_upper(dentry), !enc_lower); | ||
111 | err = PTR_ERR(fh); | 233 | err = PTR_ERR(fh); |
112 | if (IS_ERR(fh)) | 234 | if (IS_ERR(fh)) |
113 | goto fail; | 235 | goto fail; |
@@ -355,8 +477,8 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb, | |||
355 | dput(upper); | 477 | dput(upper); |
356 | } | 478 | } |
357 | 479 | ||
358 | if (!this) | 480 | if (IS_ERR_OR_NULL(this)) |
359 | return NULL; | 481 | return this; |
360 | 482 | ||
361 | if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) { | 483 | if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) { |
362 | dput(this); | 484 | dput(this); |
@@ -498,7 +620,7 @@ static struct dentry *ovl_lookup_real(struct super_block *sb, | |||
498 | if (err == -ECHILD) { | 620 | if (err == -ECHILD) { |
499 | this = ovl_lookup_real_ancestor(sb, real, | 621 | this = ovl_lookup_real_ancestor(sb, real, |
500 | layer); | 622 | layer); |
501 | err = IS_ERR(this) ? PTR_ERR(this) : 0; | 623 | err = PTR_ERR_OR_ZERO(this); |
502 | } | 624 | } |
503 | if (!err) { | 625 | if (!err) { |
504 | dput(connected); | 626 | dput(connected); |
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index fcd97b783fa1..3b1bd469accd 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c | |||
@@ -669,38 +669,59 @@ struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real, | |||
669 | return inode; | 669 | return inode; |
670 | } | 670 | } |
671 | 671 | ||
672 | /* | ||
673 | * Does overlay inode need to be hashed by lower inode? | ||
674 | */ | ||
675 | static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper, | ||
676 | struct dentry *lower, struct dentry *index) | ||
677 | { | ||
678 | struct ovl_fs *ofs = sb->s_fs_info; | ||
679 | |||
680 | /* No, if pure upper */ | ||
681 | if (!lower) | ||
682 | return false; | ||
683 | |||
684 | /* Yes, if already indexed */ | ||
685 | if (index) | ||
686 | return true; | ||
687 | |||
688 | /* Yes, if won't be copied up */ | ||
689 | if (!ofs->upper_mnt) | ||
690 | return true; | ||
691 | |||
692 | /* No, if lower hardlink is or will be broken on copy up */ | ||
693 | if ((upper || !ovl_indexdir(sb)) && | ||
694 | !d_is_dir(lower) && d_inode(lower)->i_nlink > 1) | ||
695 | return false; | ||
696 | |||
697 | /* No, if non-indexed upper with NFS export */ | ||
698 | if (sb->s_export_op && upper) | ||
699 | return false; | ||
700 | |||
701 | /* Otherwise, hash by lower inode for fsnotify */ | ||
702 | return true; | ||
703 | } | ||
704 | |||
672 | struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry, | 705 | struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry, |
673 | struct dentry *lowerdentry, struct dentry *index, | 706 | struct dentry *lowerdentry, struct dentry *index, |
674 | unsigned int numlower) | 707 | unsigned int numlower) |
675 | { | 708 | { |
676 | struct ovl_fs *ofs = sb->s_fs_info; | ||
677 | struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; | 709 | struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; |
678 | struct inode *inode; | 710 | struct inode *inode; |
679 | /* Already indexed or could be indexed on copy up? */ | 711 | bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, index); |
680 | bool indexed = (index || (ovl_indexdir(sb) && !upperdentry)); | ||
681 | struct dentry *origin = indexed ? lowerdentry : NULL; | ||
682 | bool is_dir; | 712 | bool is_dir; |
683 | 713 | ||
684 | if (WARN_ON(upperdentry && indexed && !lowerdentry)) | ||
685 | return ERR_PTR(-EIO); | ||
686 | |||
687 | if (!realinode) | 714 | if (!realinode) |
688 | realinode = d_inode(lowerdentry); | 715 | realinode = d_inode(lowerdentry); |
689 | 716 | ||
690 | /* | 717 | /* |
691 | * Copy up origin (lower) may exist for non-indexed non-dir upper, but | 718 | * Copy up origin (lower) may exist for non-indexed upper, but we must |
692 | * we must not use lower as hash key in that case. | 719 | * not use lower as hash key if this is a broken hardlink. |
693 | * Hash non-dir that is or could be indexed by origin inode. | ||
694 | * Hash dir that is or could be merged by origin inode. | ||
695 | * Hash pure upper and non-indexed non-dir by upper inode. | ||
696 | * Hash non-indexed dir by upper inode for NFS export. | ||
697 | */ | 720 | */ |
698 | is_dir = S_ISDIR(realinode->i_mode); | 721 | is_dir = S_ISDIR(realinode->i_mode); |
699 | if (is_dir && (indexed || !sb->s_export_op || !ofs->upper_mnt)) | 722 | if (upperdentry || bylower) { |
700 | origin = lowerdentry; | 723 | struct inode *key = d_inode(bylower ? lowerdentry : |
701 | 724 | upperdentry); | |
702 | if (upperdentry || origin) { | ||
703 | struct inode *key = d_inode(origin ?: upperdentry); | ||
704 | unsigned int nlink = is_dir ? 1 : realinode->i_nlink; | 725 | unsigned int nlink = is_dir ? 1 : realinode->i_nlink; |
705 | 726 | ||
706 | inode = iget5_locked(sb, (unsigned long) key, | 727 | inode = iget5_locked(sb, (unsigned long) key, |
@@ -728,6 +749,7 @@ struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry, | |||
728 | nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink); | 749 | nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink); |
729 | set_nlink(inode, nlink); | 750 | set_nlink(inode, nlink); |
730 | } else { | 751 | } else { |
752 | /* Lower hardlink that will be broken on copy up */ | ||
731 | inode = new_inode(sb); | 753 | inode = new_inode(sb); |
732 | if (!inode) | 754 | if (!inode) |
733 | goto out_nomem; | 755 | goto out_nomem; |
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index de3e6da1d5a5..70fcfcc684cc 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c | |||
@@ -913,9 +913,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, | |||
913 | stack[ctr].layer = lower.layer; | 913 | stack[ctr].layer = lower.layer; |
914 | ctr++; | 914 | ctr++; |
915 | 915 | ||
916 | if (d.stop) | ||
917 | break; | ||
918 | |||
919 | /* | 916 | /* |
920 | * Following redirects can have security consequences: it's like | 917 | * Following redirects can have security consequences: it's like |
921 | * a symlink into the lower layer without the permission checks. | 918 | * a symlink into the lower layer without the permission checks. |
@@ -933,6 +930,9 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, | |||
933 | goto out_put; | 930 | goto out_put; |
934 | } | 931 | } |
935 | 932 | ||
933 | if (d.stop) | ||
934 | break; | ||
935 | |||
936 | if (d.redirect && d.redirect[0] == '/' && poe != roe) { | 936 | if (d.redirect && d.redirect[0] == '/' && poe != roe) { |
937 | poe = roe; | 937 | poe = roe; |
938 | /* Find the current layer on the root dentry */ | 938 | /* Find the current layer on the root dentry */ |
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 0df25a9c94bd..225ff1171147 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h | |||
@@ -40,6 +40,7 @@ enum ovl_inode_flag { | |||
40 | enum ovl_entry_flag { | 40 | enum ovl_entry_flag { |
41 | OVL_E_UPPER_ALIAS, | 41 | OVL_E_UPPER_ALIAS, |
42 | OVL_E_OPAQUE, | 42 | OVL_E_OPAQUE, |
43 | OVL_E_CONNECTED, | ||
43 | }; | 44 | }; |
44 | 45 | ||
45 | /* | 46 | /* |
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 9ee37c76091d..7c24619ae7fc 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
@@ -1359,6 +1359,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
1359 | 1359 | ||
1360 | /* Root is always merge -> can have whiteouts */ | 1360 | /* Root is always merge -> can have whiteouts */ |
1361 | ovl_set_flag(OVL_WHITEOUTS, d_inode(root_dentry)); | 1361 | ovl_set_flag(OVL_WHITEOUTS, d_inode(root_dentry)); |
1362 | ovl_dentry_set_flag(OVL_E_CONNECTED, root_dentry); | ||
1362 | ovl_inode_init(d_inode(root_dentry), upperpath.dentry, | 1363 | ovl_inode_init(d_inode(root_dentry), upperpath.dentry, |
1363 | ovl_dentry_lower(root_dentry)); | 1364 | ovl_dentry_lower(root_dentry)); |
1364 | 1365 | ||
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c index fd975524f460..05c66e05ae20 100644 --- a/fs/xfs/scrub/agheader.c +++ b/fs/xfs/scrub/agheader.c | |||
@@ -767,7 +767,7 @@ int | |||
767 | xfs_scrub_agfl( | 767 | xfs_scrub_agfl( |
768 | struct xfs_scrub_context *sc) | 768 | struct xfs_scrub_context *sc) |
769 | { | 769 | { |
770 | struct xfs_scrub_agfl_info sai = { 0 }; | 770 | struct xfs_scrub_agfl_info sai; |
771 | struct xfs_agf *agf; | 771 | struct xfs_agf *agf; |
772 | xfs_agnumber_t agno; | 772 | xfs_agnumber_t agno; |
773 | unsigned int agflcount; | 773 | unsigned int agflcount; |
@@ -795,6 +795,7 @@ xfs_scrub_agfl( | |||
795 | xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); | 795 | xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); |
796 | goto out; | 796 | goto out; |
797 | } | 797 | } |
798 | memset(&sai, 0, sizeof(sai)); | ||
798 | sai.sz_entries = agflcount; | 799 | sai.sz_entries = agflcount; |
799 | sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS); | 800 | sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS); |
800 | if (!sai.entries) { | 801 | if (!sai.entries) { |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 66e1edbfb2b2..046469fcc1b8 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -955,15 +955,29 @@ static inline bool imap_needs_alloc(struct inode *inode, | |||
955 | (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); | 955 | (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); |
956 | } | 956 | } |
957 | 957 | ||
958 | static inline bool needs_cow_for_zeroing(struct xfs_bmbt_irec *imap, int nimaps) | ||
959 | { | ||
960 | return nimaps && | ||
961 | imap->br_startblock != HOLESTARTBLOCK && | ||
962 | imap->br_state != XFS_EXT_UNWRITTEN; | ||
963 | } | ||
964 | |||
958 | static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags) | 965 | static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags) |
959 | { | 966 | { |
960 | /* | 967 | /* |
961 | * COW writes will allocate delalloc space, so we need to make sure | 968 | * COW writes may allocate delalloc space or convert unwritten COW |
962 | * to take the lock exclusively here. | 969 | * extents, so we need to make sure to take the lock exclusively here. |
963 | */ | 970 | */ |
964 | if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) | 971 | if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) |
965 | return true; | 972 | return true; |
966 | if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE)) | 973 | |
974 | /* | ||
975 | * Extents not yet cached requires exclusive access, don't block. | ||
976 | * This is an opencoded xfs_ilock_data_map_shared() to cater for the | ||
977 | * non-blocking behaviour. | ||
978 | */ | ||
979 | if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && | ||
980 | !(ip->i_df.if_flags & XFS_IFEXTENTS)) | ||
967 | return true; | 981 | return true; |
968 | return false; | 982 | return false; |
969 | } | 983 | } |
@@ -993,16 +1007,18 @@ xfs_file_iomap_begin( | |||
993 | return xfs_file_iomap_begin_delay(inode, offset, length, iomap); | 1007 | return xfs_file_iomap_begin_delay(inode, offset, length, iomap); |
994 | } | 1008 | } |
995 | 1009 | ||
996 | if (need_excl_ilock(ip, flags)) { | 1010 | if (need_excl_ilock(ip, flags)) |
997 | lockmode = XFS_ILOCK_EXCL; | 1011 | lockmode = XFS_ILOCK_EXCL; |
998 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 1012 | else |
999 | } else { | 1013 | lockmode = XFS_ILOCK_SHARED; |
1000 | lockmode = xfs_ilock_data_map_shared(ip); | ||
1001 | } | ||
1002 | 1014 | ||
1003 | if ((flags & IOMAP_NOWAIT) && !(ip->i_df.if_flags & XFS_IFEXTENTS)) { | 1015 | if (flags & IOMAP_NOWAIT) { |
1004 | error = -EAGAIN; | 1016 | if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) |
1005 | goto out_unlock; | 1017 | return -EAGAIN; |
1018 | if (!xfs_ilock_nowait(ip, lockmode)) | ||
1019 | return -EAGAIN; | ||
1020 | } else { | ||
1021 | xfs_ilock(ip, lockmode); | ||
1006 | } | 1022 | } |
1007 | 1023 | ||
1008 | ASSERT(offset <= mp->m_super->s_maxbytes); | 1024 | ASSERT(offset <= mp->m_super->s_maxbytes); |
@@ -1024,7 +1040,9 @@ xfs_file_iomap_begin( | |||
1024 | goto out_unlock; | 1040 | goto out_unlock; |
1025 | } | 1041 | } |
1026 | 1042 | ||
1027 | if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) { | 1043 | if (xfs_is_reflink_inode(ip) && |
1044 | ((flags & IOMAP_WRITE) || | ||
1045 | ((flags & IOMAP_ZERO) && needs_cow_for_zeroing(&imap, nimaps)))) { | ||
1028 | if (flags & IOMAP_DIRECT) { | 1046 | if (flags & IOMAP_DIRECT) { |
1029 | /* | 1047 | /* |
1030 | * A reflinked inode will result in CoW alloc. | 1048 | * A reflinked inode will result in CoW alloc. |
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index 3a55d6fc271b..7a39f40645f7 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include "xfs_log_format.h" | 23 | #include "xfs_log_format.h" |
24 | #include "xfs_trans_resv.h" | 24 | #include "xfs_trans_resv.h" |
25 | #include "xfs_bit.h" | 25 | #include "xfs_bit.h" |
26 | #include "xfs_shared.h" | ||
26 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
27 | #include "xfs_defer.h" | 28 | #include "xfs_defer.h" |
28 | #include "xfs_trans.h" | 29 | #include "xfs_trans.h" |
@@ -456,10 +457,12 @@ xfs_cui_recover( | |||
456 | * transaction. Normally, any work that needs to be deferred | 457 | * transaction. Normally, any work that needs to be deferred |
457 | * gets attached to the same defer_ops that scheduled the | 458 | * gets attached to the same defer_ops that scheduled the |
458 | * refcount update. However, we're in log recovery here, so we | 459 | * refcount update. However, we're in log recovery here, so we |
459 | * we create our own defer_ops and use that to finish up any | 460 | * we use the passed in defer_ops and to finish up any work that |
460 | * work that doesn't fit. | 461 | * doesn't fit. We need to reserve enough blocks to handle a |
462 | * full btree split on either end of the refcount range. | ||
461 | */ | 463 | */ |
462 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); | 464 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, |
465 | mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp); | ||
463 | if (error) | 466 | if (error) |
464 | return error; | 467 | return error; |
465 | cudp = xfs_trans_get_cud(tp, cuip); | 468 | cudp = xfs_trans_get_cud(tp, cuip); |
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index f3b139c9aa16..49d3124863a8 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include "xfs_log_format.h" | 23 | #include "xfs_log_format.h" |
24 | #include "xfs_trans_resv.h" | 24 | #include "xfs_trans_resv.h" |
25 | #include "xfs_bit.h" | 25 | #include "xfs_bit.h" |
26 | #include "xfs_shared.h" | ||
26 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
27 | #include "xfs_defer.h" | 28 | #include "xfs_defer.h" |
28 | #include "xfs_trans.h" | 29 | #include "xfs_trans.h" |
@@ -470,7 +471,8 @@ xfs_rui_recover( | |||
470 | } | 471 | } |
471 | } | 472 | } |
472 | 473 | ||
473 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); | 474 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, |
475 | mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp); | ||
474 | if (error) | 476 | if (error) |
475 | return error; | 477 | return error; |
476 | rudp = xfs_trans_get_rud(tp, ruip); | 478 | rudp = xfs_trans_get_rud(tp, ruip); |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 7aba628dc527..93588ea3d3d2 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
@@ -250,6 +250,7 @@ xfs_parseargs( | |||
250 | return -EINVAL; | 250 | return -EINVAL; |
251 | break; | 251 | break; |
252 | case Opt_logdev: | 252 | case Opt_logdev: |
253 | kfree(mp->m_logname); | ||
253 | mp->m_logname = match_strdup(args); | 254 | mp->m_logname = match_strdup(args); |
254 | if (!mp->m_logname) | 255 | if (!mp->m_logname) |
255 | return -ENOMEM; | 256 | return -ENOMEM; |
@@ -258,6 +259,7 @@ xfs_parseargs( | |||
258 | xfs_warn(mp, "%s option not allowed on this system", p); | 259 | xfs_warn(mp, "%s option not allowed on this system", p); |
259 | return -EINVAL; | 260 | return -EINVAL; |
260 | case Opt_rtdev: | 261 | case Opt_rtdev: |
262 | kfree(mp->m_rtname); | ||
261 | mp->m_rtname = match_strdup(args); | 263 | mp->m_rtname = match_strdup(args); |
262 | if (!mp->m_rtname) | 264 | if (!mp->m_rtname) |
263 | return -ENOMEM; | 265 | return -ENOMEM; |
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index d32b688eb346..d23dcdd1bd95 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h | |||
@@ -56,6 +56,7 @@ struct drm_printer; | |||
56 | #define DRIVER_ATOMIC 0x10000 | 56 | #define DRIVER_ATOMIC 0x10000 |
57 | #define DRIVER_KMS_LEGACY_CONTEXT 0x20000 | 57 | #define DRIVER_KMS_LEGACY_CONTEXT 0x20000 |
58 | #define DRIVER_SYNCOBJ 0x40000 | 58 | #define DRIVER_SYNCOBJ 0x40000 |
59 | #define DRIVER_PREFER_XBGR_30BPP 0x80000 | ||
59 | 60 | ||
60 | /** | 61 | /** |
61 | * struct drm_driver - DRM driver structure | 62 | * struct drm_driver - DRM driver structure |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index cdbd142ca7f2..02924ae2527e 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -360,6 +360,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu); | |||
360 | bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); | 360 | bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); |
361 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); | 361 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); |
362 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); | 362 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); |
363 | void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid); | ||
363 | 364 | ||
364 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); | 365 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); |
365 | 366 | ||
diff --git a/include/linux/bio.h b/include/linux/bio.h index d0eb659fa733..ce547a25e8ae 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -511,6 +511,7 @@ void zero_fill_bio(struct bio *bio); | |||
511 | extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); | 511 | extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); |
512 | extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); | 512 | extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); |
513 | extern unsigned int bvec_nr_vecs(unsigned short idx); | 513 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
514 | extern const char *bio_devname(struct bio *bio, char *buffer); | ||
514 | 515 | ||
515 | #define bio_set_dev(bio, bdev) \ | 516 | #define bio_set_dev(bio, bdev) \ |
516 | do { \ | 517 | do { \ |
@@ -529,9 +530,6 @@ do { \ | |||
529 | #define bio_dev(bio) \ | 530 | #define bio_dev(bio) \ |
530 | disk_devt((bio)->bi_disk) | 531 | disk_devt((bio)->bi_disk) |
531 | 532 | ||
532 | #define bio_devname(bio, buf) \ | ||
533 | __bdevname(bio_dev(bio), (buf)) | ||
534 | |||
535 | #ifdef CONFIG_BLK_CGROUP | 533 | #ifdef CONFIG_BLK_CGROUP |
536 | int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); | 534 | int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); |
537 | void bio_disassociate_task(struct bio *bio); | 535 | void bio_disassociate_task(struct bio *bio); |
diff --git a/include/linux/compat.h b/include/linux/compat.h index 8a9643857c4a..16c3027074a2 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/if.h> | 17 | #include <linux/if.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/aio_abi.h> /* for aio_context_t */ | 19 | #include <linux/aio_abi.h> /* for aio_context_t */ |
20 | #include <linux/uaccess.h> | ||
20 | #include <linux/unistd.h> | 21 | #include <linux/unistd.h> |
21 | 22 | ||
22 | #include <asm/compat.h> | 23 | #include <asm/compat.h> |
@@ -229,13 +230,13 @@ typedef struct compat_siginfo { | |||
229 | short int _addr_lsb; /* Valid LSB of the reported address. */ | 230 | short int _addr_lsb; /* Valid LSB of the reported address. */ |
230 | /* used when si_code=SEGV_BNDERR */ | 231 | /* used when si_code=SEGV_BNDERR */ |
231 | struct { | 232 | struct { |
232 | short _dummy_bnd; | 233 | compat_uptr_t _dummy_bnd; |
233 | compat_uptr_t _lower; | 234 | compat_uptr_t _lower; |
234 | compat_uptr_t _upper; | 235 | compat_uptr_t _upper; |
235 | } _addr_bnd; | 236 | } _addr_bnd; |
236 | /* used when si_code=SEGV_PKUERR */ | 237 | /* used when si_code=SEGV_PKUERR */ |
237 | struct { | 238 | struct { |
238 | short _dummy_pkey; | 239 | compat_uptr_t _dummy_pkey; |
239 | u32 _pkey; | 240 | u32 _pkey; |
240 | } _addr_pkey; | 241 | } _addr_pkey; |
241 | }; | 242 | }; |
@@ -550,8 +551,29 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, | |||
550 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); | 551 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); |
551 | 552 | ||
552 | extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat); | 553 | extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat); |
553 | extern int put_compat_sigset(compat_sigset_t __user *compat, | 554 | |
554 | const sigset_t *set, unsigned int size); | 555 | /* |
556 | * Defined inline such that size can be compile time constant, which avoids | ||
557 | * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct | ||
558 | */ | ||
559 | static inline int | ||
560 | put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, | ||
561 | unsigned int size) | ||
562 | { | ||
563 | /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */ | ||
564 | #ifdef __BIG_ENDIAN | ||
565 | compat_sigset_t v; | ||
566 | switch (_NSIG_WORDS) { | ||
567 | case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; | ||
568 | case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; | ||
569 | case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; | ||
570 | case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; | ||
571 | } | ||
572 | return copy_to_user(compat, &v, size) ? -EFAULT : 0; | ||
573 | #else | ||
574 | return copy_to_user(compat, set, size) ? -EFAULT : 0; | ||
575 | #endif | ||
576 | } | ||
555 | 577 | ||
556 | asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, | 578 | asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, |
557 | compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, | 579 | compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, |
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index d02a4df3f473..d3f264a5b04d 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
@@ -27,3 +27,8 @@ | |||
27 | #if __has_feature(address_sanitizer) | 27 | #if __has_feature(address_sanitizer) |
28 | #define __SANITIZE_ADDRESS__ | 28 | #define __SANITIZE_ADDRESS__ |
29 | #endif | 29 | #endif |
30 | |||
31 | /* Clang doesn't have a way to turn it off per-function, yet. */ | ||
32 | #ifdef __noretpoline | ||
33 | #undef __noretpoline | ||
34 | #endif | ||
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 901c1ccb3374..e2c7f4369eff 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -93,6 +93,10 @@ | |||
93 | #define __weak __attribute__((weak)) | 93 | #define __weak __attribute__((weak)) |
94 | #define __alias(symbol) __attribute__((alias(#symbol))) | 94 | #define __alias(symbol) __attribute__((alias(#symbol))) |
95 | 95 | ||
96 | #ifdef RETPOLINE | ||
97 | #define __noretpoline __attribute__((indirect_branch("keep"))) | ||
98 | #endif | ||
99 | |||
96 | /* | 100 | /* |
97 | * it doesn't make sense on ARM (currently the only user of __naked) | 101 | * it doesn't make sense on ARM (currently the only user of __naked) |
98 | * to trace naked functions because then mcount is called without | 102 | * to trace naked functions because then mcount is called without |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 2a815560fda0..c6baf767619e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1317,6 +1317,7 @@ extern int send_sigurg(struct fown_struct *fown); | |||
1317 | #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ | 1317 | #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ |
1318 | #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ | 1318 | #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ |
1319 | #define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ | 1319 | #define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ |
1320 | #define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */ | ||
1320 | 1321 | ||
1321 | /* sb->s_iflags to limit user namespace mounts */ | 1322 | /* sb->s_iflags to limit user namespace mounts */ |
1322 | #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ | 1323 | #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ |
@@ -3198,7 +3199,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma) | |||
3198 | if (!vma_is_dax(vma)) | 3199 | if (!vma_is_dax(vma)) |
3199 | return false; | 3200 | return false; |
3200 | inode = file_inode(vma->vm_file); | 3201 | inode = file_inode(vma->vm_file); |
3201 | if (inode->i_mode == S_IFCHR) | 3202 | if (S_ISCHR(inode->i_mode)) |
3202 | return false; /* device-dax */ | 3203 | return false; /* device-dax */ |
3203 | return true; | 3204 | return true; |
3204 | } | 3205 | } |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5e3531027b51..c826b0b5232a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -198,6 +198,7 @@ struct gendisk { | |||
198 | void *private_data; | 198 | void *private_data; |
199 | 199 | ||
200 | int flags; | 200 | int flags; |
201 | struct rw_semaphore lookup_sem; | ||
201 | struct kobject *slave_dir; | 202 | struct kobject *slave_dir; |
202 | 203 | ||
203 | struct timer_rand_state *random; | 204 | struct timer_rand_state *random; |
@@ -600,8 +601,9 @@ extern void delete_partition(struct gendisk *, int); | |||
600 | extern void printk_all_partitions(void); | 601 | extern void printk_all_partitions(void); |
601 | 602 | ||
602 | extern struct gendisk *__alloc_disk_node(int minors, int node_id); | 603 | extern struct gendisk *__alloc_disk_node(int minors, int node_id); |
603 | extern struct kobject *get_disk(struct gendisk *disk); | 604 | extern struct kobject *get_disk_and_module(struct gendisk *disk); |
604 | extern void put_disk(struct gendisk *disk); | 605 | extern void put_disk(struct gendisk *disk); |
606 | extern void put_disk_and_module(struct gendisk *disk); | ||
605 | extern void blk_register_region(dev_t devt, unsigned long range, | 607 | extern void blk_register_region(dev_t devt, unsigned long range, |
606 | struct module *module, | 608 | struct module *module, |
607 | struct kobject *(*probe)(dev_t, int *, void *), | 609 | struct kobject *(*probe)(dev_t, int *, void *), |
diff --git a/include/linux/init.h b/include/linux/init.h index 506a98151131..bc27cf03c41e 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -6,10 +6,10 @@ | |||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | 7 | ||
8 | /* Built-in __init functions needn't be compiled with retpoline */ | 8 | /* Built-in __init functions needn't be compiled with retpoline */ |
9 | #if defined(RETPOLINE) && !defined(MODULE) | 9 | #if defined(__noretpoline) && !defined(MODULE) |
10 | #define __noretpoline __attribute__((indirect_branch("keep"))) | 10 | #define __noinitretpoline __noretpoline |
11 | #else | 11 | #else |
12 | #define __noretpoline | 12 | #define __noinitretpoline |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | /* These macros are used to mark some functions or | 15 | /* These macros are used to mark some functions or |
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | /* These are for everybody (although not all archs will actually | 48 | /* These are for everybody (although not all archs will actually |
49 | discard it in modules) */ | 49 | discard it in modules) */ |
50 | #define __init __section(.init.text) __cold __latent_entropy __noretpoline | 50 | #define __init __section(.init.text) __cold __latent_entropy __noinitretpoline |
51 | #define __initdata __section(.init.data) | 51 | #define __initdata __section(.init.data) |
52 | #define __initconst __section(.init.rodata) | 52 | #define __initconst __section(.init.rodata) |
53 | #define __exitdata __section(.exit.data) | 53 | #define __exitdata __section(.exit.data) |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index c00c4c33e432..b26eccc78fb1 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -503,6 +503,7 @@ | |||
503 | 503 | ||
504 | #define ICH_HCR_EN (1 << 0) | 504 | #define ICH_HCR_EN (1 << 0) |
505 | #define ICH_HCR_UIE (1 << 1) | 505 | #define ICH_HCR_UIE (1 << 1) |
506 | #define ICH_HCR_NPIE (1 << 3) | ||
506 | #define ICH_HCR_TC (1 << 10) | 507 | #define ICH_HCR_TC (1 << 10) |
507 | #define ICH_HCR_TALL0 (1 << 11) | 508 | #define ICH_HCR_TALL0 (1 << 11) |
508 | #define ICH_HCR_TALL1 (1 << 12) | 509 | #define ICH_HCR_TALL1 (1 << 12) |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index d3453ee072fc..68d8b1f73682 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
@@ -84,6 +84,7 @@ | |||
84 | 84 | ||
85 | #define GICH_HCR_EN (1 << 0) | 85 | #define GICH_HCR_EN (1 << 0) |
86 | #define GICH_HCR_UIE (1 << 1) | 86 | #define GICH_HCR_UIE (1 << 1) |
87 | #define GICH_HCR_NPIE (1 << 3) | ||
87 | 88 | ||
88 | #define GICH_LR_VIRTUALID (0x3ff << 0) | 89 | #define GICH_LR_VIRTUALID (0x3ff << 0) |
89 | #define GICH_LR_PHYSID_CPUID_SHIFT (10) | 90 | #define GICH_LR_PHYSID_CPUID_SHIFT (10) |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index b6a29c126cc4..2168cc6b8b30 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -151,6 +151,7 @@ extern struct jump_entry __start___jump_table[]; | |||
151 | extern struct jump_entry __stop___jump_table[]; | 151 | extern struct jump_entry __stop___jump_table[]; |
152 | 152 | ||
153 | extern void jump_label_init(void); | 153 | extern void jump_label_init(void); |
154 | extern void jump_label_invalidate_init(void); | ||
154 | extern void jump_label_lock(void); | 155 | extern void jump_label_lock(void); |
155 | extern void jump_label_unlock(void); | 156 | extern void jump_label_unlock(void); |
156 | extern void arch_jump_label_transform(struct jump_entry *entry, | 157 | extern void arch_jump_label_transform(struct jump_entry *entry, |
@@ -198,6 +199,8 @@ static __always_inline void jump_label_init(void) | |||
198 | static_key_initialized = true; | 199 | static_key_initialized = true; |
199 | } | 200 | } |
200 | 201 | ||
202 | static inline void jump_label_invalidate_init(void) {} | ||
203 | |||
201 | static __always_inline bool static_key_false(struct static_key *key) | 204 | static __always_inline bool static_key_false(struct static_key *key) |
202 | { | 205 | { |
203 | if (unlikely(static_key_count(key) > 0)) | 206 | if (unlikely(static_key_count(key) > 0)) |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index ce51455e2adf..3fd291503576 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -472,6 +472,7 @@ extern bool parse_option_str(const char *str, const char *option); | |||
472 | extern char *next_arg(char *args, char **param, char **val); | 472 | extern char *next_arg(char *args, char **param, char **val); |
473 | 473 | ||
474 | extern int core_kernel_text(unsigned long addr); | 474 | extern int core_kernel_text(unsigned long addr); |
475 | extern int init_kernel_text(unsigned long addr); | ||
475 | extern int core_kernel_data(unsigned long addr); | 476 | extern int core_kernel_data(unsigned long addr); |
476 | extern int __kernel_text_address(unsigned long addr); | 477 | extern int __kernel_text_address(unsigned long addr); |
477 | extern int kernel_text_address(unsigned long addr); | 478 | extern int kernel_text_address(unsigned long addr); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ac0062b74aed..6930c63126c7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -1105,7 +1105,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm) | |||
1105 | { | 1105 | { |
1106 | } | 1106 | } |
1107 | #endif | 1107 | #endif |
1108 | void kvm_arch_irq_routing_update(struct kvm *kvm); | ||
1109 | 1108 | ||
1110 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 1109 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
1111 | { | 1110 | { |
@@ -1114,6 +1113,8 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
1114 | 1113 | ||
1115 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ | 1114 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ |
1116 | 1115 | ||
1116 | void kvm_arch_irq_routing_update(struct kvm *kvm); | ||
1117 | |||
1117 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) | 1118 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
1118 | { | 1119 | { |
1119 | /* | 1120 | /* |
@@ -1272,4 +1273,7 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, | |||
1272 | } | 1273 | } |
1273 | #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ | 1274 | #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ |
1274 | 1275 | ||
1276 | void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, | ||
1277 | unsigned long start, unsigned long end); | ||
1278 | |||
1275 | #endif | 1279 | #endif |
diff --git a/include/linux/nospec.h b/include/linux/nospec.h index fbc98e2c8228..e791ebc65c9c 100644 --- a/include/linux/nospec.h +++ b/include/linux/nospec.h | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #ifndef _LINUX_NOSPEC_H | 6 | #ifndef _LINUX_NOSPEC_H |
7 | #define _LINUX_NOSPEC_H | 7 | #define _LINUX_NOSPEC_H |
8 | #include <asm/barrier.h> | ||
8 | 9 | ||
9 | /** | 10 | /** |
10 | * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise | 11 | * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise |
@@ -30,26 +31,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, | |||
30 | #endif | 31 | #endif |
31 | 32 | ||
32 | /* | 33 | /* |
33 | * Warn developers about inappropriate array_index_nospec() usage. | ||
34 | * | ||
35 | * Even if the CPU speculates past the WARN_ONCE branch, the | ||
36 | * sign bit of @index is taken into account when generating the | ||
37 | * mask. | ||
38 | * | ||
39 | * This warning is compiled out when the compiler can infer that | ||
40 | * @index and @size are less than LONG_MAX. | ||
41 | */ | ||
42 | #define array_index_mask_nospec_check(index, size) \ | ||
43 | ({ \ | ||
44 | if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \ | ||
45 | "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \ | ||
46 | _mask = 0; \ | ||
47 | else \ | ||
48 | _mask = array_index_mask_nospec(index, size); \ | ||
49 | _mask; \ | ||
50 | }) | ||
51 | |||
52 | /* | ||
53 | * array_index_nospec - sanitize an array index after a bounds check | 34 | * array_index_nospec - sanitize an array index after a bounds check |
54 | * | 35 | * |
55 | * For a code sequence like: | 36 | * For a code sequence like: |
@@ -67,12 +48,11 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, | |||
67 | ({ \ | 48 | ({ \ |
68 | typeof(index) _i = (index); \ | 49 | typeof(index) _i = (index); \ |
69 | typeof(size) _s = (size); \ | 50 | typeof(size) _s = (size); \ |
70 | unsigned long _mask = array_index_mask_nospec_check(_i, _s); \ | 51 | unsigned long _mask = array_index_mask_nospec(_i, _s); \ |
71 | \ | 52 | \ |
72 | BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ | 53 | BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ |
73 | BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ | 54 | BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ |
74 | \ | 55 | \ |
75 | _i &= _mask; \ | 56 | (typeof(_i)) (_i & _mask); \ |
76 | _i; \ | ||
77 | }) | 57 | }) |
78 | #endif /* _LINUX_NOSPEC_H */ | 58 | #endif /* _LINUX_NOSPEC_H */ |
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 88865e0ebf4d..091033a6b836 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h | |||
@@ -13,7 +13,6 @@ struct device_node; | |||
13 | struct device_node *of_pci_find_child_device(struct device_node *parent, | 13 | struct device_node *of_pci_find_child_device(struct device_node *parent, |
14 | unsigned int devfn); | 14 | unsigned int devfn); |
15 | int of_pci_get_devfn(struct device_node *np); | 15 | int of_pci_get_devfn(struct device_node *np); |
16 | int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); | ||
17 | int of_pci_parse_bus_range(struct device_node *node, struct resource *res); | 16 | int of_pci_parse_bus_range(struct device_node *node, struct resource *res); |
18 | int of_get_pci_domain_nr(struct device_node *node); | 17 | int of_get_pci_domain_nr(struct device_node *node); |
19 | int of_pci_get_max_link_speed(struct device_node *node); | 18 | int of_pci_get_max_link_speed(struct device_node *node); |
@@ -34,12 +33,6 @@ static inline int of_pci_get_devfn(struct device_node *np) | |||
34 | } | 33 | } |
35 | 34 | ||
36 | static inline int | 35 | static inline int |
37 | of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) | ||
38 | { | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | static inline int | ||
43 | of_pci_parse_bus_range(struct device_node *node, struct resource *res) | 36 | of_pci_parse_bus_range(struct device_node *node, struct resource *res) |
44 | { | 37 | { |
45 | return -EINVAL; | 38 | return -EINVAL; |
@@ -67,6 +60,16 @@ of_pci_get_max_link_speed(struct device_node *node) | |||
67 | static inline void of_pci_check_probe_only(void) { } | 60 | static inline void of_pci_check_probe_only(void) { } |
68 | #endif | 61 | #endif |
69 | 62 | ||
63 | #if IS_ENABLED(CONFIG_OF_IRQ) | ||
64 | int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); | ||
65 | #else | ||
66 | static inline int | ||
67 | of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | #endif | ||
72 | |||
70 | #if defined(CONFIG_OF_ADDRESS) | 73 | #if defined(CONFIG_OF_ADDRESS) |
71 | int of_pci_get_host_bridge_resources(struct device_node *dev, | 74 | int of_pci_get_host_bridge_resources(struct device_node *dev, |
72 | unsigned char busno, unsigned char bus_max, | 75 | unsigned char busno, unsigned char bus_max, |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 5a0c3e53e7c2..d7069539f351 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -924,6 +924,7 @@ void phy_device_remove(struct phy_device *phydev); | |||
924 | int phy_init_hw(struct phy_device *phydev); | 924 | int phy_init_hw(struct phy_device *phydev); |
925 | int phy_suspend(struct phy_device *phydev); | 925 | int phy_suspend(struct phy_device *phydev); |
926 | int phy_resume(struct phy_device *phydev); | 926 | int phy_resume(struct phy_device *phydev); |
927 | int __phy_resume(struct phy_device *phydev); | ||
927 | int phy_loopback(struct phy_device *phydev, bool enable); | 928 | int phy_loopback(struct phy_device *phydev, bool enable); |
928 | struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, | 929 | struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, |
929 | phy_interface_t interface); | 930 | phy_interface_t interface); |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c1e66bdcf583..ddf77cf4ff2d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -3285,8 +3285,7 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, | |||
3285 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); | 3285 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); |
3286 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); | 3286 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); |
3287 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); | 3287 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); |
3288 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); | 3288 | bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); |
3289 | bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); | ||
3290 | bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); | 3289 | bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); |
3291 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); | 3290 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); |
3292 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); | 3291 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); |
@@ -4104,38 +4103,6 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb) | |||
4104 | return !skb->head_frag || skb_cloned(skb); | 4103 | return !skb->head_frag || skb_cloned(skb); |
4105 | } | 4104 | } |
4106 | 4105 | ||
4107 | /** | ||
4108 | * skb_gso_network_seglen - Return length of individual segments of a gso packet | ||
4109 | * | ||
4110 | * @skb: GSO skb | ||
4111 | * | ||
4112 | * skb_gso_network_seglen is used to determine the real size of the | ||
4113 | * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). | ||
4114 | * | ||
4115 | * The MAC/L2 header is not accounted for. | ||
4116 | */ | ||
4117 | static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) | ||
4118 | { | ||
4119 | unsigned int hdr_len = skb_transport_header(skb) - | ||
4120 | skb_network_header(skb); | ||
4121 | return hdr_len + skb_gso_transport_seglen(skb); | ||
4122 | } | ||
4123 | |||
4124 | /** | ||
4125 | * skb_gso_mac_seglen - Return length of individual segments of a gso packet | ||
4126 | * | ||
4127 | * @skb: GSO skb | ||
4128 | * | ||
4129 | * skb_gso_mac_seglen is used to determine the real size of the | ||
4130 | * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 | ||
4131 | * headers (TCP/UDP). | ||
4132 | */ | ||
4133 | static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) | ||
4134 | { | ||
4135 | unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); | ||
4136 | return hdr_len + skb_gso_transport_seglen(skb); | ||
4137 | } | ||
4138 | |||
4139 | /* Local Checksum Offload. | 4106 | /* Local Checksum Offload. |
4140 | * Compute outer checksum based on the assumption that the | 4107 | * Compute outer checksum based on the assumption that the |
4141 | * inner checksum will be offloaded later. | 4108 | * inner checksum will be offloaded later. |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 0a6c71e0ad01..47f8af22f216 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -364,6 +364,7 @@ struct tty_file_private { | |||
364 | #define TTY_PTY_LOCK 16 /* pty private */ | 364 | #define TTY_PTY_LOCK 16 /* pty private */ |
365 | #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ | 365 | #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ |
366 | #define TTY_HUPPED 18 /* Post driver->hangup() */ | 366 | #define TTY_HUPPED 18 /* Post driver->hangup() */ |
367 | #define TTY_HUPPING 19 /* Hangup in progress */ | ||
367 | #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ | 368 | #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ |
368 | 369 | ||
369 | /* Values for tty->flow_change */ | 370 | /* Values for tty->flow_change */ |
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index f1fcec2fd5f8..b7a99ce56bc9 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h | |||
@@ -63,4 +63,7 @@ | |||
63 | */ | 63 | */ |
64 | #define USB_QUIRK_DISCONNECT_SUSPEND BIT(12) | 64 | #define USB_QUIRK_DISCONNECT_SUSPEND BIT(12) |
65 | 65 | ||
66 | /* Device needs a pause after every control message. */ | ||
67 | #define USB_QUIRK_DELAY_CTRL_MSG BIT(13) | ||
68 | |||
66 | #endif /* __LINUX_USB_QUIRKS_H */ | 69 | #endif /* __LINUX_USB_QUIRKS_H */ |
diff --git a/include/media/demux.h b/include/media/demux.h index c4df6cee48e6..bf00a5a41a90 100644 --- a/include/media/demux.h +++ b/include/media/demux.h | |||
@@ -117,7 +117,7 @@ struct dmx_ts_feed { | |||
117 | * specified by @filter_value that will be used on the filter | 117 | * specified by @filter_value that will be used on the filter |
118 | * match logic. | 118 | * match logic. |
119 | * @filter_mode: Contains a 16 bytes (128 bits) filter mode. | 119 | * @filter_mode: Contains a 16 bytes (128 bits) filter mode. |
120 | * @parent: Pointer to struct dmx_section_feed. | 120 | * @parent: Back-pointer to struct dmx_section_feed. |
121 | * @priv: Pointer to private data of the API client. | 121 | * @priv: Pointer to private data of the API client. |
122 | * | 122 | * |
123 | * | 123 | * |
@@ -130,8 +130,9 @@ struct dmx_section_filter { | |||
130 | u8 filter_value[DMX_MAX_FILTER_SIZE]; | 130 | u8 filter_value[DMX_MAX_FILTER_SIZE]; |
131 | u8 filter_mask[DMX_MAX_FILTER_SIZE]; | 131 | u8 filter_mask[DMX_MAX_FILTER_SIZE]; |
132 | u8 filter_mode[DMX_MAX_FILTER_SIZE]; | 132 | u8 filter_mode[DMX_MAX_FILTER_SIZE]; |
133 | struct dmx_section_feed *parent; /* Back-pointer */ | 133 | struct dmx_section_feed *parent; |
134 | void *priv; /* Pointer to private data of the API client */ | 134 | |
135 | void *priv; | ||
135 | }; | 136 | }; |
136 | 137 | ||
137 | /** | 138 | /** |
@@ -193,6 +194,10 @@ struct dmx_section_feed { | |||
193 | * @buffer2: Pointer to the tail of the filtered TS packets, or NULL. | 194 | * @buffer2: Pointer to the tail of the filtered TS packets, or NULL. |
194 | * @buffer2_length: Length of the TS data in buffer2. | 195 | * @buffer2_length: Length of the TS data in buffer2. |
195 | * @source: Indicates which TS feed is the source of the callback. | 196 | * @source: Indicates which TS feed is the source of the callback. |
197 | * @buffer_flags: Address where buffer flags are stored. Those are | ||
198 | * used to report discontinuity users via DVB | ||
199 | * memory mapped API, as defined by | ||
200 | * &enum dmx_buffer_flags. | ||
196 | * | 201 | * |
197 | * This function callback prototype, provided by the client of the demux API, | 202 | * This function callback prototype, provided by the client of the demux API, |
198 | * is called from the demux code. The function is only called when filtering | 203 | * is called from the demux code. The function is only called when filtering |
@@ -245,7 +250,8 @@ typedef int (*dmx_ts_cb)(const u8 *buffer1, | |||
245 | size_t buffer1_length, | 250 | size_t buffer1_length, |
246 | const u8 *buffer2, | 251 | const u8 *buffer2, |
247 | size_t buffer2_length, | 252 | size_t buffer2_length, |
248 | struct dmx_ts_feed *source); | 253 | struct dmx_ts_feed *source, |
254 | u32 *buffer_flags); | ||
249 | 255 | ||
250 | /** | 256 | /** |
251 | * typedef dmx_section_cb - DVB demux TS filter callback function prototype | 257 | * typedef dmx_section_cb - DVB demux TS filter callback function prototype |
@@ -261,6 +267,10 @@ typedef int (*dmx_ts_cb)(const u8 *buffer1, | |||
261 | * including headers and CRC. | 267 | * including headers and CRC. |
262 | * @source: Indicates which section feed is the source of the | 268 | * @source: Indicates which section feed is the source of the |
263 | * callback. | 269 | * callback. |
270 | * @buffer_flags: Address where buffer flags are stored. Those are | ||
271 | * used to report discontinuity users via DVB | ||
272 | * memory mapped API, as defined by | ||
273 | * &enum dmx_buffer_flags. | ||
264 | * | 274 | * |
265 | * This function callback prototype, provided by the client of the demux API, | 275 | * This function callback prototype, provided by the client of the demux API, |
266 | * is called from the demux code. The function is only called when | 276 | * is called from the demux code. The function is only called when |
@@ -286,7 +296,8 @@ typedef int (*dmx_section_cb)(const u8 *buffer1, | |||
286 | size_t buffer1_len, | 296 | size_t buffer1_len, |
287 | const u8 *buffer2, | 297 | const u8 *buffer2, |
288 | size_t buffer2_len, | 298 | size_t buffer2_len, |
289 | struct dmx_section_filter *source); | 299 | struct dmx_section_filter *source, |
300 | u32 *buffer_flags); | ||
290 | 301 | ||
291 | /* | 302 | /* |
292 | * DVB Front-End | 303 | * DVB Front-End |
diff --git a/include/media/dmxdev.h b/include/media/dmxdev.h index 2f5cb2c7b6a7..baafa3b8aca4 100644 --- a/include/media/dmxdev.h +++ b/include/media/dmxdev.h | |||
@@ -163,6 +163,7 @@ struct dmxdev_filter { | |||
163 | * @demux: pointer to &struct dmx_demux. | 163 | * @demux: pointer to &struct dmx_demux. |
164 | * @filternum: number of filters. | 164 | * @filternum: number of filters. |
165 | * @capabilities: demux capabilities as defined by &enum dmx_demux_caps. | 165 | * @capabilities: demux capabilities as defined by &enum dmx_demux_caps. |
166 | * @may_do_mmap: flag used to indicate if the device may do mmap. | ||
166 | * @exit: flag to indicate that the demux is being released. | 167 | * @exit: flag to indicate that the demux is being released. |
167 | * @dvr_orig_fe: pointer to &struct dmx_frontend. | 168 | * @dvr_orig_fe: pointer to &struct dmx_frontend. |
168 | * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output. | 169 | * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output. |
@@ -180,6 +181,7 @@ struct dmxdev { | |||
180 | int filternum; | 181 | int filternum; |
181 | int capabilities; | 182 | int capabilities; |
182 | 183 | ||
184 | unsigned int may_do_mmap:1; | ||
183 | unsigned int exit:1; | 185 | unsigned int exit:1; |
184 | #define DMXDEV_CAP_DUPLEX 1 | 186 | #define DMXDEV_CAP_DUPLEX 1 |
185 | struct dmx_frontend *dvr_orig_fe; | 187 | struct dmx_frontend *dvr_orig_fe; |
diff --git a/include/media/dvb_demux.h b/include/media/dvb_demux.h index b07092038f4b..3b6aeca7a49e 100644 --- a/include/media/dvb_demux.h +++ b/include/media/dvb_demux.h | |||
@@ -115,6 +115,8 @@ struct dvb_demux_filter { | |||
115 | * @pid: PID to be filtered. | 115 | * @pid: PID to be filtered. |
116 | * @timeout: feed timeout. | 116 | * @timeout: feed timeout. |
117 | * @filter: pointer to &struct dvb_demux_filter. | 117 | * @filter: pointer to &struct dvb_demux_filter. |
118 | * @buffer_flags: Buffer flags used to report discontinuity users via DVB | ||
119 | * memory mapped API, as defined by &enum dmx_buffer_flags. | ||
118 | * @ts_type: type of TS, as defined by &enum ts_filter_type. | 120 | * @ts_type: type of TS, as defined by &enum ts_filter_type. |
119 | * @pes_type: type of PES, as defined by &enum dmx_ts_pes. | 121 | * @pes_type: type of PES, as defined by &enum dmx_ts_pes. |
120 | * @cc: MPEG-TS packet continuity counter | 122 | * @cc: MPEG-TS packet continuity counter |
@@ -145,6 +147,8 @@ struct dvb_demux_feed { | |||
145 | ktime_t timeout; | 147 | ktime_t timeout; |
146 | struct dvb_demux_filter *filter; | 148 | struct dvb_demux_filter *filter; |
147 | 149 | ||
150 | u32 buffer_flags; | ||
151 | |||
148 | enum ts_filter_type ts_type; | 152 | enum ts_filter_type ts_type; |
149 | enum dmx_ts_pes pes_type; | 153 | enum dmx_ts_pes pes_type; |
150 | 154 | ||
diff --git a/include/media/dvb_vb2.h b/include/media/dvb_vb2.h index 01d1202d1a55..8cb88452cd6c 100644 --- a/include/media/dvb_vb2.h +++ b/include/media/dvb_vb2.h | |||
@@ -85,6 +85,12 @@ struct dvb_buffer { | |||
85 | * @nonblocking: | 85 | * @nonblocking: |
86 | * If different than zero, device is operating on non-blocking | 86 | * If different than zero, device is operating on non-blocking |
87 | * mode. | 87 | * mode. |
88 | * @flags: buffer flags as defined by &enum dmx_buffer_flags. | ||
89 | * Filled only at &DMX_DQBUF. &DMX_QBUF should zero this field. | ||
90 | * @count: monotonic counter for filled buffers. Helps to identify | ||
91 | * data stream loses. Filled only at &DMX_DQBUF. &DMX_QBUF should | ||
92 | * zero this field. | ||
93 | * | ||
88 | * @name: name of the device type. Currently, it can either be | 94 | * @name: name of the device type. Currently, it can either be |
89 | * "dvr" or "demux_filter". | 95 | * "dvr" or "demux_filter". |
90 | */ | 96 | */ |
@@ -100,10 +106,14 @@ struct dvb_vb2_ctx { | |||
100 | int buf_siz; | 106 | int buf_siz; |
101 | int buf_cnt; | 107 | int buf_cnt; |
102 | int nonblocking; | 108 | int nonblocking; |
109 | |||
110 | enum dmx_buffer_flags flags; | ||
111 | u32 count; | ||
112 | |||
103 | char name[DVB_VB2_NAME_MAX + 1]; | 113 | char name[DVB_VB2_NAME_MAX + 1]; |
104 | }; | 114 | }; |
105 | 115 | ||
106 | #ifndef DVB_MMAP | 116 | #ifndef CONFIG_DVB_MMAP |
107 | static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx, | 117 | static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx, |
108 | const char *name, int non_blocking) | 118 | const char *name, int non_blocking) |
109 | { | 119 | { |
@@ -114,7 +124,7 @@ static inline int dvb_vb2_release(struct dvb_vb2_ctx *ctx) | |||
114 | return 0; | 124 | return 0; |
115 | }; | 125 | }; |
116 | #define dvb_vb2_is_streaming(ctx) (0) | 126 | #define dvb_vb2_is_streaming(ctx) (0) |
117 | #define dvb_vb2_fill_buffer(ctx, file, wait) (0) | 127 | #define dvb_vb2_fill_buffer(ctx, file, wait, flags) (0) |
118 | 128 | ||
119 | static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, | 129 | static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, |
120 | struct file *file, | 130 | struct file *file, |
@@ -153,9 +163,13 @@ int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx); | |||
153 | * @ctx: control struct for VB2 handler | 163 | * @ctx: control struct for VB2 handler |
154 | * @src: place where the data is stored | 164 | * @src: place where the data is stored |
155 | * @len: number of bytes to be copied from @src | 165 | * @len: number of bytes to be copied from @src |
166 | * @buffer_flags: | ||
167 | * pointer to buffer flags as defined by &enum dmx_buffer_flags. | ||
168 | * can be NULL. | ||
156 | */ | 169 | */ |
157 | int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, | 170 | int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, |
158 | const unsigned char *src, int len); | 171 | const unsigned char *src, int len, |
172 | enum dmx_buffer_flags *buffer_flags); | ||
159 | 173 | ||
160 | /** | 174 | /** |
161 | * dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV | 175 | * dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV |
diff --git a/include/net/devlink.h b/include/net/devlink.h index 6545b03e97f7..4de35ed12bcc 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h | |||
@@ -257,6 +257,18 @@ struct devlink_resource_size_params { | |||
257 | enum devlink_resource_unit unit; | 257 | enum devlink_resource_unit unit; |
258 | }; | 258 | }; |
259 | 259 | ||
260 | static inline void | ||
261 | devlink_resource_size_params_init(struct devlink_resource_size_params *size_params, | ||
262 | u64 size_min, u64 size_max, | ||
263 | u64 size_granularity, | ||
264 | enum devlink_resource_unit unit) | ||
265 | { | ||
266 | size_params->size_min = size_min; | ||
267 | size_params->size_max = size_max; | ||
268 | size_params->size_granularity = size_granularity; | ||
269 | size_params->unit = unit; | ||
270 | } | ||
271 | |||
260 | /** | 272 | /** |
261 | * struct devlink_resource - devlink resource | 273 | * struct devlink_resource - devlink resource |
262 | * @name: name of the resource | 274 | * @name: name of the resource |
@@ -278,7 +290,7 @@ struct devlink_resource { | |||
278 | u64 size_new; | 290 | u64 size_new; |
279 | bool size_valid; | 291 | bool size_valid; |
280 | struct devlink_resource *parent; | 292 | struct devlink_resource *parent; |
281 | struct devlink_resource_size_params *size_params; | 293 | struct devlink_resource_size_params size_params; |
282 | struct list_head list; | 294 | struct list_head list; |
283 | struct list_head resource_list; | 295 | struct list_head resource_list; |
284 | const struct devlink_resource_ops *resource_ops; | 296 | const struct devlink_resource_ops *resource_ops; |
@@ -402,7 +414,7 @@ int devlink_resource_register(struct devlink *devlink, | |||
402 | u64 resource_size, | 414 | u64 resource_size, |
403 | u64 resource_id, | 415 | u64 resource_id, |
404 | u64 parent_resource_id, | 416 | u64 parent_resource_id, |
405 | struct devlink_resource_size_params *size_params, | 417 | const struct devlink_resource_size_params *size_params, |
406 | const struct devlink_resource_ops *resource_ops); | 418 | const struct devlink_resource_ops *resource_ops); |
407 | void devlink_resources_unregister(struct devlink *devlink, | 419 | void devlink_resources_unregister(struct devlink *devlink, |
408 | struct devlink_resource *resource); | 420 | struct devlink_resource *resource); |
@@ -556,7 +568,7 @@ devlink_resource_register(struct devlink *devlink, | |||
556 | u64 resource_size, | 568 | u64 resource_size, |
557 | u64 resource_id, | 569 | u64 resource_id, |
558 | u64 parent_resource_id, | 570 | u64 parent_resource_id, |
559 | struct devlink_resource_size_params *size_params, | 571 | const struct devlink_resource_size_params *size_params, |
560 | const struct devlink_resource_ops *resource_ops) | 572 | const struct devlink_resource_ops *resource_ops) |
561 | { | 573 | { |
562 | return 0; | 574 | return 0; |
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index d8d4a902a88d..2280b2351739 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h | |||
@@ -68,6 +68,9 @@ struct scsi_cmnd { | |||
68 | struct list_head list; /* scsi_cmnd participates in queue lists */ | 68 | struct list_head list; /* scsi_cmnd participates in queue lists */ |
69 | struct list_head eh_entry; /* entry for the host eh_cmd_q */ | 69 | struct list_head eh_entry; /* entry for the host eh_cmd_q */ |
70 | struct delayed_work abort_work; | 70 | struct delayed_work abort_work; |
71 | |||
72 | struct rcu_head rcu; | ||
73 | |||
71 | int eh_eflags; /* Used by error handlr */ | 74 | int eh_eflags; /* Used by error handlr */ |
72 | 75 | ||
73 | /* | 76 | /* |
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 1a1df0d21ee3..a8b7bf879ced 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -571,8 +571,6 @@ struct Scsi_Host { | |||
571 | struct blk_mq_tag_set tag_set; | 571 | struct blk_mq_tag_set tag_set; |
572 | }; | 572 | }; |
573 | 573 | ||
574 | struct rcu_head rcu; | ||
575 | |||
576 | atomic_t host_busy; /* commands actually active on low-level */ | 574 | atomic_t host_busy; /* commands actually active on low-level */ |
577 | atomic_t host_blocked; | 575 | atomic_t host_blocked; |
578 | 576 | ||
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h index c2d1b15da136..a91f25151a5b 100644 --- a/include/soc/arc/mcip.h +++ b/include/soc/arc/mcip.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #define ARC_REG_MCIP_BCR 0x0d0 | 16 | #define ARC_REG_MCIP_BCR 0x0d0 |
17 | #define ARC_REG_MCIP_IDU_BCR 0x0D5 | 17 | #define ARC_REG_MCIP_IDU_BCR 0x0D5 |
18 | #define ARC_REG_GFRC_BUILD 0x0D6 | ||
18 | #define ARC_REG_MCIP_CMD 0x600 | 19 | #define ARC_REG_MCIP_CMD 0x600 |
19 | #define ARC_REG_MCIP_WDATA 0x601 | 20 | #define ARC_REG_MCIP_WDATA 0x601 |
20 | #define ARC_REG_MCIP_READBACK 0x602 | 21 | #define ARC_REG_MCIP_READBACK 0x602 |
@@ -36,10 +37,14 @@ struct mcip_cmd { | |||
36 | #define CMD_SEMA_RELEASE 0x12 | 37 | #define CMD_SEMA_RELEASE 0x12 |
37 | 38 | ||
38 | #define CMD_DEBUG_SET_MASK 0x34 | 39 | #define CMD_DEBUG_SET_MASK 0x34 |
40 | #define CMD_DEBUG_READ_MASK 0x35 | ||
39 | #define CMD_DEBUG_SET_SELECT 0x36 | 41 | #define CMD_DEBUG_SET_SELECT 0x36 |
42 | #define CMD_DEBUG_READ_SELECT 0x37 | ||
40 | 43 | ||
41 | #define CMD_GFRC_READ_LO 0x42 | 44 | #define CMD_GFRC_READ_LO 0x42 |
42 | #define CMD_GFRC_READ_HI 0x43 | 45 | #define CMD_GFRC_READ_HI 0x43 |
46 | #define CMD_GFRC_SET_CORE 0x47 | ||
47 | #define CMD_GFRC_READ_CORE 0x48 | ||
43 | 48 | ||
44 | #define CMD_IDU_ENABLE 0x71 | 49 | #define CMD_IDU_ENABLE 0x71 |
45 | #define CMD_IDU_DISABLE 0x72 | 50 | #define CMD_IDU_DISABLE 0x72 |
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index 85dc965afd89..99c902e460c2 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h | |||
@@ -102,13 +102,13 @@ typedef struct siginfo { | |||
102 | short _addr_lsb; /* LSB of the reported address */ | 102 | short _addr_lsb; /* LSB of the reported address */ |
103 | /* used when si_code=SEGV_BNDERR */ | 103 | /* used when si_code=SEGV_BNDERR */ |
104 | struct { | 104 | struct { |
105 | short _dummy_bnd; | 105 | void *_dummy_bnd; |
106 | void __user *_lower; | 106 | void __user *_lower; |
107 | void __user *_upper; | 107 | void __user *_upper; |
108 | } _addr_bnd; | 108 | } _addr_bnd; |
109 | /* used when si_code=SEGV_PKUERR */ | 109 | /* used when si_code=SEGV_PKUERR */ |
110 | struct { | 110 | struct { |
111 | short _dummy_pkey; | 111 | void *_dummy_pkey; |
112 | __u32 _pkey; | 112 | __u32 _pkey; |
113 | } _addr_pkey; | 113 | } _addr_pkey; |
114 | }; | 114 | }; |
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index 91a31ffed828..9a781f0611df 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h | |||
@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer { | |||
63 | }; | 63 | }; |
64 | 64 | ||
65 | #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ | 65 | #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ |
66 | #define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */ | ||
66 | 67 | ||
67 | struct drm_virtgpu_getparam { | 68 | struct drm_virtgpu_getparam { |
68 | __u64 param; | 69 | __u64 param; |
diff --git a/include/uapi/linux/blktrace_api.h b/include/uapi/linux/blktrace_api.h index 20d1490d6377..3c50e07ee833 100644 --- a/include/uapi/linux/blktrace_api.h +++ b/include/uapi/linux/blktrace_api.h | |||
@@ -131,7 +131,7 @@ enum { | |||
131 | #define BLKTRACE_BDEV_SIZE 32 | 131 | #define BLKTRACE_BDEV_SIZE 32 |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * User setup structure passed with BLKTRACESTART | 134 | * User setup structure passed with BLKTRACESETUP |
135 | */ | 135 | */ |
136 | struct blk_user_trace_setup { | 136 | struct blk_user_trace_setup { |
137 | char name[BLKTRACE_BDEV_SIZE]; /* output */ | 137 | char name[BLKTRACE_BDEV_SIZE]; /* output */ |
diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h index 5f3c5a918f00..b4112f0b6dd3 100644 --- a/include/uapi/linux/dvb/dmx.h +++ b/include/uapi/linux/dvb/dmx.h | |||
@@ -212,6 +212,32 @@ struct dmx_stc { | |||
212 | }; | 212 | }; |
213 | 213 | ||
214 | /** | 214 | /** |
215 | * enum dmx_buffer_flags - DMX memory-mapped buffer flags | ||
216 | * | ||
217 | * @DMX_BUFFER_FLAG_HAD_CRC32_DISCARD: | ||
218 | * Indicates that the Kernel discarded one or more frames due to wrong | ||
219 | * CRC32 checksum. | ||
220 | * @DMX_BUFFER_FLAG_TEI: | ||
221 | * Indicates that the Kernel has detected a Transport Error indicator | ||
222 | * (TEI) on a filtered pid. | ||
223 | * @DMX_BUFFER_PKT_COUNTER_MISMATCH: | ||
224 | * Indicates that the Kernel has detected a packet counter mismatch | ||
225 | * on a filtered pid. | ||
226 | * @DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED: | ||
227 | * Indicates that the Kernel has detected one or more frame discontinuity. | ||
228 | * @DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR: | ||
229 | * Received at least one packet with a frame discontinuity indicator. | ||
230 | */ | ||
231 | |||
232 | enum dmx_buffer_flags { | ||
233 | DMX_BUFFER_FLAG_HAD_CRC32_DISCARD = 1 << 0, | ||
234 | DMX_BUFFER_FLAG_TEI = 1 << 1, | ||
235 | DMX_BUFFER_PKT_COUNTER_MISMATCH = 1 << 2, | ||
236 | DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED = 1 << 3, | ||
237 | DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR = 1 << 4, | ||
238 | }; | ||
239 | |||
240 | /** | ||
215 | * struct dmx_buffer - dmx buffer info | 241 | * struct dmx_buffer - dmx buffer info |
216 | * | 242 | * |
217 | * @index: id number of the buffer | 243 | * @index: id number of the buffer |
@@ -220,15 +246,24 @@ struct dmx_stc { | |||
220 | * offset from the start of the device memory for this plane, | 246 | * offset from the start of the device memory for this plane, |
221 | * (or a "cookie" that should be passed to mmap() as offset) | 247 | * (or a "cookie" that should be passed to mmap() as offset) |
222 | * @length: size in bytes of the buffer | 248 | * @length: size in bytes of the buffer |
249 | * @flags: bit array of buffer flags as defined by &enum dmx_buffer_flags. | ||
250 | * Filled only at &DMX_DQBUF. | ||
251 | * @count: monotonic counter for filled buffers. Helps to identify | ||
252 | * data stream loses. Filled only at &DMX_DQBUF. | ||
223 | * | 253 | * |
224 | * Contains data exchanged by application and driver using one of the streaming | 254 | * Contains data exchanged by application and driver using one of the streaming |
225 | * I/O methods. | 255 | * I/O methods. |
256 | * | ||
257 | * Please notice that, for &DMX_QBUF, only @index should be filled. | ||
258 | * On &DMX_DQBUF calls, all fields will be filled by the Kernel. | ||
226 | */ | 259 | */ |
227 | struct dmx_buffer { | 260 | struct dmx_buffer { |
228 | __u32 index; | 261 | __u32 index; |
229 | __u32 bytesused; | 262 | __u32 bytesused; |
230 | __u32 offset; | 263 | __u32 offset; |
231 | __u32 length; | 264 | __u32 length; |
265 | __u32 flags; | ||
266 | __u32 count; | ||
232 | }; | 267 | }; |
233 | 268 | ||
234 | /** | 269 | /** |
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 0fb5ef939732..7b26d4b0b052 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -761,6 +761,7 @@ struct kvm_ppc_resize_hpt { | |||
761 | #define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 | 761 | #define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 |
762 | #define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 | 762 | #define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 |
763 | #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) | 763 | #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) |
764 | #define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list) | ||
764 | 765 | ||
765 | /* | 766 | /* |
766 | * Extension capability list. | 767 | * Extension capability list. |
@@ -934,6 +935,7 @@ struct kvm_ppc_resize_hpt { | |||
934 | #define KVM_CAP_S390_AIS_MIGRATION 150 | 935 | #define KVM_CAP_S390_AIS_MIGRATION 150 |
935 | #define KVM_CAP_PPC_GET_CPU_CHAR 151 | 936 | #define KVM_CAP_PPC_GET_CPU_CHAR 151 |
936 | #define KVM_CAP_S390_BPB 152 | 937 | #define KVM_CAP_S390_BPB 152 |
938 | #define KVM_CAP_GET_MSR_FEATURES 153 | ||
937 | 939 | ||
938 | #ifdef KVM_CAP_IRQ_ROUTING | 940 | #ifdef KVM_CAP_IRQ_ROUTING |
939 | 941 | ||
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 3d77fe91239a..9008f31c7eb6 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h | |||
@@ -42,7 +42,7 @@ typedef enum { | |||
42 | SEV_RET_INVALID_PLATFORM_STATE, | 42 | SEV_RET_INVALID_PLATFORM_STATE, |
43 | SEV_RET_INVALID_GUEST_STATE, | 43 | SEV_RET_INVALID_GUEST_STATE, |
44 | SEV_RET_INAVLID_CONFIG, | 44 | SEV_RET_INAVLID_CONFIG, |
45 | SEV_RET_INVALID_len, | 45 | SEV_RET_INVALID_LEN, |
46 | SEV_RET_ALREADY_OWNED, | 46 | SEV_RET_ALREADY_OWNED, |
47 | SEV_RET_INVALID_CERTIFICATE, | 47 | SEV_RET_INVALID_CERTIFICATE, |
48 | SEV_RET_POLICY_FAILURE, | 48 | SEV_RET_POLICY_FAILURE, |
diff --git a/include/uapi/misc/ocxl.h b/include/uapi/misc/ocxl.h index 4b0b0b756f3e..0af83d80fb3e 100644 --- a/include/uapi/misc/ocxl.h +++ b/include/uapi/misc/ocxl.h | |||
@@ -32,6 +32,22 @@ struct ocxl_ioctl_attach { | |||
32 | __u64 reserved3; | 32 | __u64 reserved3; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | struct ocxl_ioctl_metadata { | ||
36 | __u16 version; // struct version, always backwards compatible | ||
37 | |||
38 | // Version 0 fields | ||
39 | __u8 afu_version_major; | ||
40 | __u8 afu_version_minor; | ||
41 | __u32 pasid; // PASID assigned to the current context | ||
42 | |||
43 | __u64 pp_mmio_size; // Per PASID MMIO size | ||
44 | __u64 global_mmio_size; | ||
45 | |||
46 | // End version 0 fields | ||
47 | |||
48 | __u64 reserved[13]; // Total of 16*u64 | ||
49 | }; | ||
50 | |||
35 | struct ocxl_ioctl_irq_fd { | 51 | struct ocxl_ioctl_irq_fd { |
36 | __u64 irq_offset; | 52 | __u64 irq_offset; |
37 | __s32 eventfd; | 53 | __s32 eventfd; |
@@ -45,5 +61,6 @@ struct ocxl_ioctl_irq_fd { | |||
45 | #define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64) | 61 | #define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64) |
46 | #define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64) | 62 | #define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64) |
47 | #define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd) | 63 | #define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd) |
64 | #define OCXL_IOCTL_GET_METADATA _IOR(OCXL_MAGIC, 0x14, struct ocxl_ioctl_metadata) | ||
48 | 65 | ||
49 | #endif /* _UAPI_MISC_OCXL_H */ | 66 | #endif /* _UAPI_MISC_OCXL_H */ |
diff --git a/init/main.c b/init/main.c index a8100b954839..969eaf140ef0 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -89,6 +89,7 @@ | |||
89 | #include <linux/io.h> | 89 | #include <linux/io.h> |
90 | #include <linux/cache.h> | 90 | #include <linux/cache.h> |
91 | #include <linux/rodata_test.h> | 91 | #include <linux/rodata_test.h> |
92 | #include <linux/jump_label.h> | ||
92 | 93 | ||
93 | #include <asm/io.h> | 94 | #include <asm/io.h> |
94 | #include <asm/bugs.h> | 95 | #include <asm/bugs.h> |
@@ -1000,6 +1001,7 @@ static int __ref kernel_init(void *unused) | |||
1000 | /* need to finish all async __init code before freeing the memory */ | 1001 | /* need to finish all async __init code before freeing the memory */ |
1001 | async_synchronize_full(); | 1002 | async_synchronize_full(); |
1002 | ftrace_free_init_mem(); | 1003 | ftrace_free_init_mem(); |
1004 | jump_label_invalidate_init(); | ||
1003 | free_initmem(); | 1005 | free_initmem(); |
1004 | mark_readonly(); | 1006 | mark_readonly(); |
1005 | system_state = SYSTEM_RUNNING; | 1007 | system_state = SYSTEM_RUNNING; |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5fb69a85d967..c6eff108aa99 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -1356,6 +1356,13 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) | |||
1356 | return reg->type == PTR_TO_CTX; | 1356 | return reg->type == PTR_TO_CTX; |
1357 | } | 1357 | } |
1358 | 1358 | ||
1359 | static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) | ||
1360 | { | ||
1361 | const struct bpf_reg_state *reg = cur_regs(env) + regno; | ||
1362 | |||
1363 | return type_is_pkt_pointer(reg->type); | ||
1364 | } | ||
1365 | |||
1359 | static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, | 1366 | static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, |
1360 | const struct bpf_reg_state *reg, | 1367 | const struct bpf_reg_state *reg, |
1361 | int off, int size, bool strict) | 1368 | int off, int size, bool strict) |
@@ -1416,10 +1423,10 @@ static int check_generic_ptr_alignment(struct bpf_verifier_env *env, | |||
1416 | } | 1423 | } |
1417 | 1424 | ||
1418 | static int check_ptr_alignment(struct bpf_verifier_env *env, | 1425 | static int check_ptr_alignment(struct bpf_verifier_env *env, |
1419 | const struct bpf_reg_state *reg, | 1426 | const struct bpf_reg_state *reg, int off, |
1420 | int off, int size) | 1427 | int size, bool strict_alignment_once) |
1421 | { | 1428 | { |
1422 | bool strict = env->strict_alignment; | 1429 | bool strict = env->strict_alignment || strict_alignment_once; |
1423 | const char *pointer_desc = ""; | 1430 | const char *pointer_desc = ""; |
1424 | 1431 | ||
1425 | switch (reg->type) { | 1432 | switch (reg->type) { |
@@ -1576,9 +1583,9 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) | |||
1576 | * if t==write && value_regno==-1, some unknown value is stored into memory | 1583 | * if t==write && value_regno==-1, some unknown value is stored into memory |
1577 | * if t==read && value_regno==-1, don't care what we read from memory | 1584 | * if t==read && value_regno==-1, don't care what we read from memory |
1578 | */ | 1585 | */ |
1579 | static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, | 1586 | static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, |
1580 | int bpf_size, enum bpf_access_type t, | 1587 | int off, int bpf_size, enum bpf_access_type t, |
1581 | int value_regno) | 1588 | int value_regno, bool strict_alignment_once) |
1582 | { | 1589 | { |
1583 | struct bpf_reg_state *regs = cur_regs(env); | 1590 | struct bpf_reg_state *regs = cur_regs(env); |
1584 | struct bpf_reg_state *reg = regs + regno; | 1591 | struct bpf_reg_state *reg = regs + regno; |
@@ -1590,7 +1597,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn | |||
1590 | return size; | 1597 | return size; |
1591 | 1598 | ||
1592 | /* alignment checks will add in reg->off themselves */ | 1599 | /* alignment checks will add in reg->off themselves */ |
1593 | err = check_ptr_alignment(env, reg, off, size); | 1600 | err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); |
1594 | if (err) | 1601 | if (err) |
1595 | return err; | 1602 | return err; |
1596 | 1603 | ||
@@ -1735,21 +1742,23 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins | |||
1735 | return -EACCES; | 1742 | return -EACCES; |
1736 | } | 1743 | } |
1737 | 1744 | ||
1738 | if (is_ctx_reg(env, insn->dst_reg)) { | 1745 | if (is_ctx_reg(env, insn->dst_reg) || |
1739 | verbose(env, "BPF_XADD stores into R%d context is not allowed\n", | 1746 | is_pkt_reg(env, insn->dst_reg)) { |
1740 | insn->dst_reg); | 1747 | verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", |
1748 | insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ? | ||
1749 | "context" : "packet"); | ||
1741 | return -EACCES; | 1750 | return -EACCES; |
1742 | } | 1751 | } |
1743 | 1752 | ||
1744 | /* check whether atomic_add can read the memory */ | 1753 | /* check whether atomic_add can read the memory */ |
1745 | err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, | 1754 | err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, |
1746 | BPF_SIZE(insn->code), BPF_READ, -1); | 1755 | BPF_SIZE(insn->code), BPF_READ, -1, true); |
1747 | if (err) | 1756 | if (err) |
1748 | return err; | 1757 | return err; |
1749 | 1758 | ||
1750 | /* check whether atomic_add can write into the same memory */ | 1759 | /* check whether atomic_add can write into the same memory */ |
1751 | return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, | 1760 | return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, |
1752 | BPF_SIZE(insn->code), BPF_WRITE, -1); | 1761 | BPF_SIZE(insn->code), BPF_WRITE, -1, true); |
1753 | } | 1762 | } |
1754 | 1763 | ||
1755 | /* when register 'regno' is passed into function that will read 'access_size' | 1764 | /* when register 'regno' is passed into function that will read 'access_size' |
@@ -2388,7 +2397,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
2388 | * is inferred from register state. | 2397 | * is inferred from register state. |
2389 | */ | 2398 | */ |
2390 | for (i = 0; i < meta.access_size; i++) { | 2399 | for (i = 0; i < meta.access_size; i++) { |
2391 | err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); | 2400 | err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, |
2401 | BPF_WRITE, -1, false); | ||
2392 | if (err) | 2402 | if (err) |
2393 | return err; | 2403 | return err; |
2394 | } | 2404 | } |
@@ -4632,7 +4642,7 @@ static int do_check(struct bpf_verifier_env *env) | |||
4632 | */ | 4642 | */ |
4633 | err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, | 4643 | err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, |
4634 | BPF_SIZE(insn->code), BPF_READ, | 4644 | BPF_SIZE(insn->code), BPF_READ, |
4635 | insn->dst_reg); | 4645 | insn->dst_reg, false); |
4636 | if (err) | 4646 | if (err) |
4637 | return err; | 4647 | return err; |
4638 | 4648 | ||
@@ -4684,7 +4694,7 @@ static int do_check(struct bpf_verifier_env *env) | |||
4684 | /* check that memory (dst_reg + off) is writeable */ | 4694 | /* check that memory (dst_reg + off) is writeable */ |
4685 | err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, | 4695 | err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, |
4686 | BPF_SIZE(insn->code), BPF_WRITE, | 4696 | BPF_SIZE(insn->code), BPF_WRITE, |
4687 | insn->src_reg); | 4697 | insn->src_reg, false); |
4688 | if (err) | 4698 | if (err) |
4689 | return err; | 4699 | return err; |
4690 | 4700 | ||
@@ -4719,7 +4729,7 @@ static int do_check(struct bpf_verifier_env *env) | |||
4719 | /* check that memory (dst_reg + off) is writeable */ | 4729 | /* check that memory (dst_reg + off) is writeable */ |
4720 | err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, | 4730 | err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, |
4721 | BPF_SIZE(insn->code), BPF_WRITE, | 4731 | BPF_SIZE(insn->code), BPF_WRITE, |
4722 | -1); | 4732 | -1, false); |
4723 | if (err) | 4733 | if (err) |
4724 | return err; | 4734 | return err; |
4725 | 4735 | ||
diff --git a/kernel/compat.c b/kernel/compat.c index 3247fe761f60..3f5fa8902e7d 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -488,25 +488,6 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat) | |||
488 | } | 488 | } |
489 | EXPORT_SYMBOL_GPL(get_compat_sigset); | 489 | EXPORT_SYMBOL_GPL(get_compat_sigset); |
490 | 490 | ||
491 | int | ||
492 | put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, | ||
493 | unsigned int size) | ||
494 | { | ||
495 | /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */ | ||
496 | #ifdef __BIG_ENDIAN | ||
497 | compat_sigset_t v; | ||
498 | switch (_NSIG_WORDS) { | ||
499 | case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; | ||
500 | case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; | ||
501 | case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; | ||
502 | case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; | ||
503 | } | ||
504 | return copy_to_user(compat, &v, size) ? -EFAULT : 0; | ||
505 | #else | ||
506 | return copy_to_user(compat, set, size) ? -EFAULT : 0; | ||
507 | #endif | ||
508 | } | ||
509 | |||
510 | #ifdef CONFIG_NUMA | 491 | #ifdef CONFIG_NUMA |
511 | COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, | 492 | COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, |
512 | compat_uptr_t __user *, pages32, | 493 | compat_uptr_t __user *, pages32, |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 96db9ae5d5af..4b838470fac4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2246,7 +2246,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, | |||
2246 | struct perf_event_context *task_ctx, | 2246 | struct perf_event_context *task_ctx, |
2247 | enum event_type_t event_type) | 2247 | enum event_type_t event_type) |
2248 | { | 2248 | { |
2249 | enum event_type_t ctx_event_type = event_type & EVENT_ALL; | 2249 | enum event_type_t ctx_event_type; |
2250 | bool cpu_event = !!(event_type & EVENT_CPU); | 2250 | bool cpu_event = !!(event_type & EVENT_CPU); |
2251 | 2251 | ||
2252 | /* | 2252 | /* |
@@ -2256,6 +2256,8 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, | |||
2256 | if (event_type & EVENT_PINNED) | 2256 | if (event_type & EVENT_PINNED) |
2257 | event_type |= EVENT_FLEXIBLE; | 2257 | event_type |= EVENT_FLEXIBLE; |
2258 | 2258 | ||
2259 | ctx_event_type = event_type & EVENT_ALL; | ||
2260 | |||
2259 | perf_pmu_disable(cpuctx->ctx.pmu); | 2261 | perf_pmu_disable(cpuctx->ctx.pmu); |
2260 | if (task_ctx) | 2262 | if (task_ctx) |
2261 | task_ctx_sched_out(cpuctx, task_ctx, event_type); | 2263 | task_ctx_sched_out(cpuctx, task_ctx, event_type); |
diff --git a/kernel/extable.c b/kernel/extable.c index a17fdb63dc3e..6a5b61ebc66c 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -64,7 +64,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) | |||
64 | return e; | 64 | return e; |
65 | } | 65 | } |
66 | 66 | ||
67 | static inline int init_kernel_text(unsigned long addr) | 67 | int init_kernel_text(unsigned long addr) |
68 | { | 68 | { |
69 | if (addr >= (unsigned long)_sinittext && | 69 | if (addr >= (unsigned long)_sinittext && |
70 | addr < (unsigned long)_einittext) | 70 | addr < (unsigned long)_einittext) |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index b4517095db6a..e7214093dcd1 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -366,12 +366,16 @@ static void __jump_label_update(struct static_key *key, | |||
366 | { | 366 | { |
367 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { | 367 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { |
368 | /* | 368 | /* |
369 | * entry->code set to 0 invalidates module init text sections | 369 | * An entry->code of 0 indicates an entry which has been |
370 | * kernel_text_address() verifies we are not in core kernel | 370 | * disabled because it was in an init text area. |
371 | * init code, see jump_label_invalidate_module_init(). | ||
372 | */ | 371 | */ |
373 | if (entry->code && kernel_text_address(entry->code)) | 372 | if (entry->code) { |
374 | arch_jump_label_transform(entry, jump_label_type(entry)); | 373 | if (kernel_text_address(entry->code)) |
374 | arch_jump_label_transform(entry, jump_label_type(entry)); | ||
375 | else | ||
376 | WARN_ONCE(1, "can't patch jump_label at %pS", | ||
377 | (void *)(unsigned long)entry->code); | ||
378 | } | ||
375 | } | 379 | } |
376 | } | 380 | } |
377 | 381 | ||
@@ -417,6 +421,19 @@ void __init jump_label_init(void) | |||
417 | cpus_read_unlock(); | 421 | cpus_read_unlock(); |
418 | } | 422 | } |
419 | 423 | ||
424 | /* Disable any jump label entries in __init code */ | ||
425 | void __init jump_label_invalidate_init(void) | ||
426 | { | ||
427 | struct jump_entry *iter_start = __start___jump_table; | ||
428 | struct jump_entry *iter_stop = __stop___jump_table; | ||
429 | struct jump_entry *iter; | ||
430 | |||
431 | for (iter = iter_start; iter < iter_stop; iter++) { | ||
432 | if (init_kernel_text(iter->code)) | ||
433 | iter->code = 0; | ||
434 | } | ||
435 | } | ||
436 | |||
420 | #ifdef CONFIG_MODULES | 437 | #ifdef CONFIG_MODULES |
421 | 438 | ||
422 | static enum jump_label_type jump_label_init_type(struct jump_entry *entry) | 439 | static enum jump_label_type jump_label_init_type(struct jump_entry *entry) |
@@ -633,6 +650,7 @@ static void jump_label_del_module(struct module *mod) | |||
633 | } | 650 | } |
634 | } | 651 | } |
635 | 652 | ||
653 | /* Disable any jump label entries in module init code */ | ||
636 | static void jump_label_invalidate_module_init(struct module *mod) | 654 | static void jump_label_invalidate_module_init(struct module *mod) |
637 | { | 655 | { |
638 | struct jump_entry *iter_start = mod->jump_entries; | 656 | struct jump_entry *iter_start = mod->jump_entries; |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 65cc0cb984e6..940633c63254 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -1616,11 +1616,12 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, | |||
1616 | void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) | 1616 | void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) |
1617 | { | 1617 | { |
1618 | DEFINE_WAKE_Q(wake_q); | 1618 | DEFINE_WAKE_Q(wake_q); |
1619 | unsigned long flags; | ||
1619 | bool postunlock; | 1620 | bool postunlock; |
1620 | 1621 | ||
1621 | raw_spin_lock_irq(&lock->wait_lock); | 1622 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
1622 | postunlock = __rt_mutex_futex_unlock(lock, &wake_q); | 1623 | postunlock = __rt_mutex_futex_unlock(lock, &wake_q); |
1623 | raw_spin_unlock_irq(&lock->wait_lock); | 1624 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
1624 | 1625 | ||
1625 | if (postunlock) | 1626 | if (postunlock) |
1626 | rt_mutex_postunlock(&wake_q); | 1627 | rt_mutex_postunlock(&wake_q); |
diff --git a/kernel/memremap.c b/kernel/memremap.c index 4849be5f9b3c..4dd4274cabe2 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c | |||
@@ -275,8 +275,15 @@ static unsigned long pfn_end(struct dev_pagemap *pgmap) | |||
275 | return (res->start + resource_size(res)) >> PAGE_SHIFT; | 275 | return (res->start + resource_size(res)) >> PAGE_SHIFT; |
276 | } | 276 | } |
277 | 277 | ||
278 | static unsigned long pfn_next(unsigned long pfn) | ||
279 | { | ||
280 | if (pfn % 1024 == 0) | ||
281 | cond_resched(); | ||
282 | return pfn + 1; | ||
283 | } | ||
284 | |||
278 | #define for_each_device_pfn(pfn, map) \ | 285 | #define for_each_device_pfn(pfn, map) \ |
279 | for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) | 286 | for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn)) |
280 | 287 | ||
281 | static void devm_memremap_pages_release(void *data) | 288 | static void devm_memremap_pages_release(void *data) |
282 | { | 289 | { |
@@ -337,10 +344,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |||
337 | resource_size_t align_start, align_size, align_end; | 344 | resource_size_t align_start, align_size, align_end; |
338 | struct vmem_altmap *altmap = pgmap->altmap_valid ? | 345 | struct vmem_altmap *altmap = pgmap->altmap_valid ? |
339 | &pgmap->altmap : NULL; | 346 | &pgmap->altmap : NULL; |
347 | struct resource *res = &pgmap->res; | ||
340 | unsigned long pfn, pgoff, order; | 348 | unsigned long pfn, pgoff, order; |
341 | pgprot_t pgprot = PAGE_KERNEL; | 349 | pgprot_t pgprot = PAGE_KERNEL; |
342 | int error, nid, is_ram, i = 0; | 350 | int error, nid, is_ram; |
343 | struct resource *res = &pgmap->res; | ||
344 | 351 | ||
345 | align_start = res->start & ~(SECTION_SIZE - 1); | 352 | align_start = res->start & ~(SECTION_SIZE - 1); |
346 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) | 353 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
@@ -409,8 +416,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |||
409 | list_del(&page->lru); | 416 | list_del(&page->lru); |
410 | page->pgmap = pgmap; | 417 | page->pgmap = pgmap; |
411 | percpu_ref_get(pgmap->ref); | 418 | percpu_ref_get(pgmap->ref); |
412 | if (!(++i % 1024)) | ||
413 | cond_resched(); | ||
414 | } | 419 | } |
415 | 420 | ||
416 | devm_add_action(dev, devm_memremap_pages_release, pgmap); | 421 | devm_add_action(dev, devm_memremap_pages_release, pgmap); |
diff --git a/kernel/panic.c b/kernel/panic.c index 2cfef408fec9..4b794f1d8561 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs); | |||
640 | */ | 640 | */ |
641 | __visible void __stack_chk_fail(void) | 641 | __visible void __stack_chk_fail(void) |
642 | { | 642 | { |
643 | panic("stack-protector: Kernel stack is corrupted in: %p\n", | 643 | panic("stack-protector: Kernel stack is corrupted in: %pB\n", |
644 | __builtin_return_address(0)); | 644 | __builtin_return_address(0)); |
645 | } | 645 | } |
646 | EXPORT_SYMBOL(__stack_chk_fail); | 646 | EXPORT_SYMBOL(__stack_chk_fail); |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index fc1123583fa6..f274fbef821d 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -2397,7 +2397,7 @@ skip: | |||
2397 | 2397 | ||
2398 | if (console_lock_spinning_disable_and_check()) { | 2398 | if (console_lock_spinning_disable_and_check()) { |
2399 | printk_safe_exit_irqrestore(flags); | 2399 | printk_safe_exit_irqrestore(flags); |
2400 | return; | 2400 | goto out; |
2401 | } | 2401 | } |
2402 | 2402 | ||
2403 | printk_safe_exit_irqrestore(flags); | 2403 | printk_safe_exit_irqrestore(flags); |
@@ -2430,6 +2430,7 @@ skip: | |||
2430 | if (retry && console_trylock()) | 2430 | if (retry && console_trylock()) |
2431 | goto again; | 2431 | goto again; |
2432 | 2432 | ||
2433 | out: | ||
2433 | if (wake_klogd) | 2434 | if (wake_klogd) |
2434 | wake_up_klogd(); | 2435 | wake_up_klogd(); |
2435 | } | 2436 | } |
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 48150ab42de9..4a4fd567fb26 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
@@ -1894,6 +1894,12 @@ int timers_dead_cpu(unsigned int cpu) | |||
1894 | raw_spin_lock_irq(&new_base->lock); | 1894 | raw_spin_lock_irq(&new_base->lock); |
1895 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1895 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1896 | 1896 | ||
1897 | /* | ||
1898 | * The current CPUs base clock might be stale. Update it | ||
1899 | * before moving the timers over. | ||
1900 | */ | ||
1901 | forward_timer_base(new_base); | ||
1902 | |||
1897 | BUG_ON(old_base->running_timer); | 1903 | BUG_ON(old_base->running_timer); |
1898 | 1904 | ||
1899 | for (i = 0; i < WHEEL_SIZE; i++) | 1905 | for (i = 0; i < WHEEL_SIZE; i++) |
diff --git a/lib/btree.c b/lib/btree.c index f93a945274af..590facba2c50 100644 --- a/lib/btree.c +++ b/lib/btree.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * As should be obvious for Linux kernel code, license is GPLv2 | 4 | * As should be obvious for Linux kernel code, license is GPLv2 |
5 | * | 5 | * |
6 | * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> | 6 | * Copyright (c) 2007-2008 Joern Engel <joern@purestorage.com> |
7 | * Bits and pieces stolen from Peter Zijlstra's code, which is | 7 | * Bits and pieces stolen from Peter Zijlstra's code, which is |
8 | * Copyright 2007, Red Hat Inc. Peter Zijlstra | 8 | * Copyright 2007, Red Hat Inc. Peter Zijlstra |
9 | * GPLv2 | 9 | * GPLv2 |
@@ -76,6 +76,8 @@ struct btree_geo btree_geo128 = { | |||
76 | }; | 76 | }; |
77 | EXPORT_SYMBOL_GPL(btree_geo128); | 77 | EXPORT_SYMBOL_GPL(btree_geo128); |
78 | 78 | ||
79 | #define MAX_KEYLEN (2 * LONG_PER_U64) | ||
80 | |||
79 | static struct kmem_cache *btree_cachep; | 81 | static struct kmem_cache *btree_cachep; |
80 | 82 | ||
81 | void *btree_alloc(gfp_t gfp_mask, void *pool_data) | 83 | void *btree_alloc(gfp_t gfp_mask, void *pool_data) |
@@ -313,7 +315,7 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, | |||
313 | { | 315 | { |
314 | int i, height; | 316 | int i, height; |
315 | unsigned long *node, *oldnode; | 317 | unsigned long *node, *oldnode; |
316 | unsigned long *retry_key = NULL, key[geo->keylen]; | 318 | unsigned long *retry_key = NULL, key[MAX_KEYLEN]; |
317 | 319 | ||
318 | if (keyzero(geo, __key)) | 320 | if (keyzero(geo, __key)) |
319 | return NULL; | 321 | return NULL; |
@@ -639,8 +641,8 @@ EXPORT_SYMBOL_GPL(btree_remove); | |||
639 | int btree_merge(struct btree_head *target, struct btree_head *victim, | 641 | int btree_merge(struct btree_head *target, struct btree_head *victim, |
640 | struct btree_geo *geo, gfp_t gfp) | 642 | struct btree_geo *geo, gfp_t gfp) |
641 | { | 643 | { |
642 | unsigned long key[geo->keylen]; | 644 | unsigned long key[MAX_KEYLEN]; |
643 | unsigned long dup[geo->keylen]; | 645 | unsigned long dup[MAX_KEYLEN]; |
644 | void *val; | 646 | void *val; |
645 | int err; | 647 | int err; |
646 | 648 | ||
@@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
150 | return BUG_TRAP_TYPE_NONE; | 150 | return BUG_TRAP_TYPE_NONE; |
151 | 151 | ||
152 | bug = find_bug(bugaddr); | 152 | bug = find_bug(bugaddr); |
153 | if (!bug) | ||
154 | return BUG_TRAP_TYPE_NONE; | ||
153 | 155 | ||
154 | file = NULL; | 156 | file = NULL; |
155 | line = 0; | 157 | line = 0; |
@@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
191 | if (file) | 193 | if (file) |
192 | pr_crit("kernel BUG at %s:%u!\n", file, line); | 194 | pr_crit("kernel BUG at %s:%u!\n", file, line); |
193 | else | 195 | else |
194 | pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n", | 196 | pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n", |
195 | (void *)bugaddr); | 197 | (void *)bugaddr); |
196 | 198 | ||
197 | return BUG_TRAP_TYPE_BUG; | 199 | return BUG_TRAP_TYPE_BUG; |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 1b34d210452c..7f5cdc1e6b29 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -1491,12 +1491,12 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
1491 | if (unlikely(virt == NULL)) | 1491 | if (unlikely(virt == NULL)) |
1492 | return; | 1492 | return; |
1493 | 1493 | ||
1494 | entry = dma_entry_alloc(); | 1494 | /* handle vmalloc and linear addresses */ |
1495 | if (!entry) | 1495 | if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) |
1496 | return; | 1496 | return; |
1497 | 1497 | ||
1498 | /* handle vmalloc and linear addresses */ | 1498 | entry = dma_entry_alloc(); |
1499 | if (!is_vmalloc_addr(virt) && !virt_to_page(virt)) | 1499 | if (!entry) |
1500 | return; | 1500 | return; |
1501 | 1501 | ||
1502 | entry->type = dma_debug_coherent; | 1502 | entry->type = dma_debug_coherent; |
@@ -1528,7 +1528,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
1528 | }; | 1528 | }; |
1529 | 1529 | ||
1530 | /* handle vmalloc and linear addresses */ | 1530 | /* handle vmalloc and linear addresses */ |
1531 | if (!is_vmalloc_addr(virt) && !virt_to_page(virt)) | 1531 | if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) |
1532 | return; | 1532 | return; |
1533 | 1533 | ||
1534 | if (is_vmalloc_addr(virt)) | 1534 | if (is_vmalloc_addr(virt)) |
@@ -36,8 +36,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid, | |||
36 | { | 36 | { |
37 | struct radix_tree_iter iter; | 37 | struct radix_tree_iter iter; |
38 | void __rcu **slot; | 38 | void __rcu **slot; |
39 | int base = idr->idr_base; | 39 | unsigned int base = idr->idr_base; |
40 | int id = *nextid; | 40 | unsigned int id = *nextid; |
41 | 41 | ||
42 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) | 42 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) |
43 | return -EINVAL; | 43 | return -EINVAL; |
@@ -204,10 +204,11 @@ int idr_for_each(const struct idr *idr, | |||
204 | 204 | ||
205 | radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { | 205 | radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { |
206 | int ret; | 206 | int ret; |
207 | unsigned long id = iter.index + base; | ||
207 | 208 | ||
208 | if (WARN_ON_ONCE(iter.index > INT_MAX)) | 209 | if (WARN_ON_ONCE(id > INT_MAX)) |
209 | break; | 210 | break; |
210 | ret = fn(iter.index + base, rcu_dereference_raw(*slot), data); | 211 | ret = fn(id, rcu_dereference_raw(*slot), data); |
211 | if (ret) | 212 | if (ret) |
212 | return ret; | 213 | return ret; |
213 | } | 214 | } |
@@ -230,8 +231,8 @@ void *idr_get_next(struct idr *idr, int *nextid) | |||
230 | { | 231 | { |
231 | struct radix_tree_iter iter; | 232 | struct radix_tree_iter iter; |
232 | void __rcu **slot; | 233 | void __rcu **slot; |
233 | int base = idr->idr_base; | 234 | unsigned long base = idr->idr_base; |
234 | int id = *nextid; | 235 | unsigned long id = *nextid; |
235 | 236 | ||
236 | id = (id < base) ? 0 : id - base; | 237 | id = (id < base) ? 0 : id - base; |
237 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); | 238 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index b4e22345963f..2efb213716fa 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
@@ -24,10 +24,11 @@ | |||
24 | #include <linux/if_vlan.h> | 24 | #include <linux/if_vlan.h> |
25 | #include <linux/random.h> | 25 | #include <linux/random.h> |
26 | #include <linux/highmem.h> | 26 | #include <linux/highmem.h> |
27 | #include <linux/sched.h> | ||
27 | 28 | ||
28 | /* General test specific settings */ | 29 | /* General test specific settings */ |
29 | #define MAX_SUBTESTS 3 | 30 | #define MAX_SUBTESTS 3 |
30 | #define MAX_TESTRUNS 10000 | 31 | #define MAX_TESTRUNS 1000 |
31 | #define MAX_DATA 128 | 32 | #define MAX_DATA 128 |
32 | #define MAX_INSNS 512 | 33 | #define MAX_INSNS 512 |
33 | #define MAX_K 0xffffFFFF | 34 | #define MAX_K 0xffffFFFF |
@@ -6582,6 +6583,7 @@ static __init int test_bpf(void) | |||
6582 | struct bpf_prog *fp; | 6583 | struct bpf_prog *fp; |
6583 | int err; | 6584 | int err; |
6584 | 6585 | ||
6586 | cond_resched(); | ||
6585 | if (exclude_test(i)) | 6587 | if (exclude_test(i)) |
6586 | continue; | 6588 | continue; |
6587 | 6589 | ||
diff --git a/lib/test_kmod.c b/lib/test_kmod.c index e372b97eee13..0e5b7a61460b 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c | |||
@@ -1141,7 +1141,7 @@ static struct kmod_test_device *register_test_dev_kmod(void) | |||
1141 | mutex_lock(®_dev_mutex); | 1141 | mutex_lock(®_dev_mutex); |
1142 | 1142 | ||
1143 | /* int should suffice for number of devices, test for wrap */ | 1143 | /* int should suffice for number of devices, test for wrap */ |
1144 | if (unlikely(num_test_devs + 1) < 0) { | 1144 | if (num_test_devs + 1 == INT_MAX) { |
1145 | pr_err("reached limit of number of test devices\n"); | 1145 | pr_err("reached limit of number of test devices\n"); |
1146 | goto out; | 1146 | goto out; |
1147 | } | 1147 | } |
@@ -516,7 +516,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, | |||
516 | } | 516 | } |
517 | 517 | ||
518 | if (ret & VM_FAULT_RETRY) { | 518 | if (ret & VM_FAULT_RETRY) { |
519 | if (nonblocking) | 519 | if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) |
520 | *nonblocking = 0; | 520 | *nonblocking = 0; |
521 | return -EBUSY; | 521 | return -EBUSY; |
522 | } | 522 | } |
@@ -890,7 +890,10 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, | |||
890 | break; | 890 | break; |
891 | } | 891 | } |
892 | if (*locked) { | 892 | if (*locked) { |
893 | /* VM_FAULT_RETRY didn't trigger */ | 893 | /* |
894 | * VM_FAULT_RETRY didn't trigger or it was a | ||
895 | * FOLL_NOWAIT. | ||
896 | */ | ||
894 | if (!pages_done) | 897 | if (!pages_done) |
895 | pages_done = ret; | 898 | pages_done = ret; |
896 | break; | 899 | break; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7c204e3d132b..a963f2034dfc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1583,7 +1583,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, | |||
1583 | page = NULL; | 1583 | page = NULL; |
1584 | } else { | 1584 | } else { |
1585 | h->surplus_huge_pages++; | 1585 | h->surplus_huge_pages++; |
1586 | h->nr_huge_pages_node[page_to_nid(page)]++; | 1586 | h->surplus_huge_pages_node[page_to_nid(page)]++; |
1587 | } | 1587 | } |
1588 | 1588 | ||
1589 | out_unlock: | 1589 | out_unlock: |
diff --git a/mm/memblock.c b/mm/memblock.c index 5a9ca2a1751b..b6ba6b7adadc 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -1107,7 +1107,7 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, | |||
1107 | struct memblock_type *type = &memblock.memory; | 1107 | struct memblock_type *type = &memblock.memory; |
1108 | unsigned int right = type->cnt; | 1108 | unsigned int right = type->cnt; |
1109 | unsigned int mid, left = 0; | 1109 | unsigned int mid, left = 0; |
1110 | phys_addr_t addr = PFN_PHYS(pfn + 1); | 1110 | phys_addr_t addr = PFN_PHYS(++pfn); |
1111 | 1111 | ||
1112 | do { | 1112 | do { |
1113 | mid = (right + left) / 2; | 1113 | mid = (right + left) / 2; |
@@ -1118,15 +1118,15 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, | |||
1118 | type->regions[mid].size)) | 1118 | type->regions[mid].size)) |
1119 | left = mid + 1; | 1119 | left = mid + 1; |
1120 | else { | 1120 | else { |
1121 | /* addr is within the region, so pfn + 1 is valid */ | 1121 | /* addr is within the region, so pfn is valid */ |
1122 | return min(pfn + 1, max_pfn); | 1122 | return pfn; |
1123 | } | 1123 | } |
1124 | } while (left < right); | 1124 | } while (left < right); |
1125 | 1125 | ||
1126 | if (right == type->cnt) | 1126 | if (right == type->cnt) |
1127 | return max_pfn; | 1127 | return -1UL; |
1128 | else | 1128 | else |
1129 | return min(PHYS_PFN(type->regions[right].base), max_pfn); | 1129 | return PHYS_PFN(type->regions[right].base); |
1130 | } | 1130 | } |
1131 | 1131 | ||
1132 | /** | 1132 | /** |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb416723538f..635d7dd29d7f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1910,7 +1910,9 @@ static int move_freepages(struct zone *zone, | |||
1910 | * Remove at a later date when no bug reports exist related to | 1910 | * Remove at a later date when no bug reports exist related to |
1911 | * grouping pages by mobility | 1911 | * grouping pages by mobility |
1912 | */ | 1912 | */ |
1913 | VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); | 1913 | VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) && |
1914 | pfn_valid(page_to_pfn(end_page)) && | ||
1915 | page_zone(start_page) != page_zone(end_page)); | ||
1914 | #endif | 1916 | #endif |
1915 | 1917 | ||
1916 | if (num_movable) | 1918 | if (num_movable) |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 79e326383726..99abeadf416e 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -157,7 +157,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node) | |||
157 | * Return: 0 on success, a negative error code otherwise. | 157 | * Return: 0 on success, a negative error code otherwise. |
158 | */ | 158 | */ |
159 | static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node, | 159 | static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node, |
160 | int max_if_num) | 160 | unsigned int max_if_num) |
161 | { | 161 | { |
162 | void *data_ptr; | 162 | void *data_ptr; |
163 | size_t old_size; | 163 | size_t old_size; |
@@ -201,7 +201,8 @@ unlock: | |||
201 | */ | 201 | */ |
202 | static void | 202 | static void |
203 | batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, | 203 | batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, |
204 | int max_if_num, int del_if_num) | 204 | unsigned int max_if_num, |
205 | unsigned int del_if_num) | ||
205 | { | 206 | { |
206 | size_t chunk_size; | 207 | size_t chunk_size; |
207 | size_t if_offset; | 208 | size_t if_offset; |
@@ -239,7 +240,8 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, | |||
239 | */ | 240 | */ |
240 | static void | 241 | static void |
241 | batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, | 242 | batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, |
242 | int max_if_num, int del_if_num) | 243 | unsigned int max_if_num, |
244 | unsigned int del_if_num) | ||
243 | { | 245 | { |
244 | size_t if_offset; | 246 | size_t if_offset; |
245 | void *data_ptr; | 247 | void *data_ptr; |
@@ -276,7 +278,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, | |||
276 | * Return: 0 on success, a negative error code otherwise. | 278 | * Return: 0 on success, a negative error code otherwise. |
277 | */ | 279 | */ |
278 | static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node, | 280 | static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node, |
279 | int max_if_num, int del_if_num) | 281 | unsigned int max_if_num, |
282 | unsigned int del_if_num) | ||
280 | { | 283 | { |
281 | spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); | 284 | spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); |
282 | 285 | ||
@@ -311,7 +314,8 @@ static struct batadv_orig_node * | |||
311 | batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) | 314 | batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) |
312 | { | 315 | { |
313 | struct batadv_orig_node *orig_node; | 316 | struct batadv_orig_node *orig_node; |
314 | int size, hash_added; | 317 | int hash_added; |
318 | size_t size; | ||
315 | 319 | ||
316 | orig_node = batadv_orig_hash_find(bat_priv, addr); | 320 | orig_node = batadv_orig_hash_find(bat_priv, addr); |
317 | if (orig_node) | 321 | if (orig_node) |
@@ -893,7 +897,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) | |||
893 | u32 i; | 897 | u32 i; |
894 | size_t word_index; | 898 | size_t word_index; |
895 | u8 *w; | 899 | u8 *w; |
896 | int if_num; | 900 | unsigned int if_num; |
897 | 901 | ||
898 | for (i = 0; i < hash->size; i++) { | 902 | for (i = 0; i < hash->size; i++) { |
899 | head = &hash->table[i]; | 903 | head = &hash->table[i]; |
@@ -1023,7 +1027,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
1023 | struct batadv_neigh_node *tmp_neigh_node = NULL; | 1027 | struct batadv_neigh_node *tmp_neigh_node = NULL; |
1024 | struct batadv_neigh_node *router = NULL; | 1028 | struct batadv_neigh_node *router = NULL; |
1025 | struct batadv_orig_node *orig_node_tmp; | 1029 | struct batadv_orig_node *orig_node_tmp; |
1026 | int if_num; | 1030 | unsigned int if_num; |
1027 | u8 sum_orig, sum_neigh; | 1031 | u8 sum_orig, sum_neigh; |
1028 | u8 *neigh_addr; | 1032 | u8 *neigh_addr; |
1029 | u8 tq_avg; | 1033 | u8 tq_avg; |
@@ -1182,7 +1186,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, | |||
1182 | u8 total_count; | 1186 | u8 total_count; |
1183 | u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; | 1187 | u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; |
1184 | unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; | 1188 | unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; |
1185 | int if_num; | 1189 | unsigned int if_num; |
1186 | unsigned int tq_asym_penalty, inv_asym_penalty; | 1190 | unsigned int tq_asym_penalty, inv_asym_penalty; |
1187 | unsigned int combined_tq; | 1191 | unsigned int combined_tq; |
1188 | unsigned int tq_iface_penalty; | 1192 | unsigned int tq_iface_penalty; |
@@ -1702,9 +1706,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, | |||
1702 | 1706 | ||
1703 | if (is_my_orig) { | 1707 | if (is_my_orig) { |
1704 | unsigned long *word; | 1708 | unsigned long *word; |
1705 | int offset; | 1709 | size_t offset; |
1706 | s32 bit_pos; | 1710 | s32 bit_pos; |
1707 | s16 if_num; | 1711 | unsigned int if_num; |
1708 | u8 *weight; | 1712 | u8 *weight; |
1709 | 1713 | ||
1710 | orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, | 1714 | orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, |
@@ -2729,7 +2733,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, | |||
2729 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; | 2733 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; |
2730 | struct batadv_neigh_node *router; | 2734 | struct batadv_neigh_node *router; |
2731 | struct batadv_gw_node *curr_gw; | 2735 | struct batadv_gw_node *curr_gw; |
2732 | int ret = -EINVAL; | 2736 | int ret = 0; |
2733 | void *hdr; | 2737 | void *hdr; |
2734 | 2738 | ||
2735 | router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); | 2739 | router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); |
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 27e165ac9302..c74f81341dab 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c | |||
@@ -928,7 +928,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, | |||
928 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; | 928 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; |
929 | struct batadv_neigh_node *router; | 929 | struct batadv_neigh_node *router; |
930 | struct batadv_gw_node *curr_gw; | 930 | struct batadv_gw_node *curr_gw; |
931 | int ret = -EINVAL; | 931 | int ret = 0; |
932 | void *hdr; | 932 | void *hdr; |
933 | 933 | ||
934 | router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); | 934 | router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); |
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index fad47853ad3c..b1a08374088b 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -2161,22 +2161,25 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, | |||
2161 | { | 2161 | { |
2162 | struct batadv_bla_claim *claim; | 2162 | struct batadv_bla_claim *claim; |
2163 | int idx = 0; | 2163 | int idx = 0; |
2164 | int ret = 0; | ||
2164 | 2165 | ||
2165 | rcu_read_lock(); | 2166 | rcu_read_lock(); |
2166 | hlist_for_each_entry_rcu(claim, head, hash_entry) { | 2167 | hlist_for_each_entry_rcu(claim, head, hash_entry) { |
2167 | if (idx++ < *idx_skip) | 2168 | if (idx++ < *idx_skip) |
2168 | continue; | 2169 | continue; |
2169 | if (batadv_bla_claim_dump_entry(msg, portid, seq, | 2170 | |
2170 | primary_if, claim)) { | 2171 | ret = batadv_bla_claim_dump_entry(msg, portid, seq, |
2172 | primary_if, claim); | ||
2173 | if (ret) { | ||
2171 | *idx_skip = idx - 1; | 2174 | *idx_skip = idx - 1; |
2172 | goto unlock; | 2175 | goto unlock; |
2173 | } | 2176 | } |
2174 | } | 2177 | } |
2175 | 2178 | ||
2176 | *idx_skip = idx; | 2179 | *idx_skip = 0; |
2177 | unlock: | 2180 | unlock: |
2178 | rcu_read_unlock(); | 2181 | rcu_read_unlock(); |
2179 | return 0; | 2182 | return ret; |
2180 | } | 2183 | } |
2181 | 2184 | ||
2182 | /** | 2185 | /** |
@@ -2391,22 +2394,25 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, | |||
2391 | { | 2394 | { |
2392 | struct batadv_bla_backbone_gw *backbone_gw; | 2395 | struct batadv_bla_backbone_gw *backbone_gw; |
2393 | int idx = 0; | 2396 | int idx = 0; |
2397 | int ret = 0; | ||
2394 | 2398 | ||
2395 | rcu_read_lock(); | 2399 | rcu_read_lock(); |
2396 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { | 2400 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
2397 | if (idx++ < *idx_skip) | 2401 | if (idx++ < *idx_skip) |
2398 | continue; | 2402 | continue; |
2399 | if (batadv_bla_backbone_dump_entry(msg, portid, seq, | 2403 | |
2400 | primary_if, backbone_gw)) { | 2404 | ret = batadv_bla_backbone_dump_entry(msg, portid, seq, |
2405 | primary_if, backbone_gw); | ||
2406 | if (ret) { | ||
2401 | *idx_skip = idx - 1; | 2407 | *idx_skip = idx - 1; |
2402 | goto unlock; | 2408 | goto unlock; |
2403 | } | 2409 | } |
2404 | } | 2410 | } |
2405 | 2411 | ||
2406 | *idx_skip = idx; | 2412 | *idx_skip = 0; |
2407 | unlock: | 2413 | unlock: |
2408 | rcu_read_unlock(); | 2414 | rcu_read_unlock(); |
2409 | return 0; | 2415 | return ret; |
2410 | } | 2416 | } |
2411 | 2417 | ||
2412 | /** | 2418 | /** |
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 22dde42fd80e..5afe641ee4b0 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
@@ -288,7 +288,8 @@ batadv_frag_merge_packets(struct hlist_head *chain) | |||
288 | /* Move the existing MAC header to just before the payload. (Override | 288 | /* Move the existing MAC header to just before the payload. (Override |
289 | * the fragment header.) | 289 | * the fragment header.) |
290 | */ | 290 | */ |
291 | skb_pull_rcsum(skb_out, hdr_size); | 291 | skb_pull(skb_out, hdr_size); |
292 | skb_out->ip_summed = CHECKSUM_NONE; | ||
292 | memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); | 293 | memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); |
293 | skb_set_mac_header(skb_out, -ETH_HLEN); | 294 | skb_set_mac_header(skb_out, -ETH_HLEN); |
294 | skb_reset_network_header(skb_out); | 295 | skb_reset_network_header(skb_out); |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 5f186bff284a..68b54a39c51d 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -763,6 +763,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, | |||
763 | hard_iface->soft_iface = soft_iface; | 763 | hard_iface->soft_iface = soft_iface; |
764 | bat_priv = netdev_priv(hard_iface->soft_iface); | 764 | bat_priv = netdev_priv(hard_iface->soft_iface); |
765 | 765 | ||
766 | if (bat_priv->num_ifaces >= UINT_MAX) { | ||
767 | ret = -ENOSPC; | ||
768 | goto err_dev; | ||
769 | } | ||
770 | |||
766 | ret = netdev_master_upper_dev_link(hard_iface->net_dev, | 771 | ret = netdev_master_upper_dev_link(hard_iface->net_dev, |
767 | soft_iface, NULL, NULL, NULL); | 772 | soft_iface, NULL, NULL, NULL); |
768 | if (ret) | 773 | if (ret) |
@@ -876,7 +881,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, | |||
876 | batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface); | 881 | batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface); |
877 | 882 | ||
878 | /* nobody uses this interface anymore */ | 883 | /* nobody uses this interface anymore */ |
879 | if (!bat_priv->num_ifaces) { | 884 | if (bat_priv->num_ifaces == 0) { |
880 | batadv_gw_check_client_stop(bat_priv); | 885 | batadv_gw_check_client_stop(bat_priv); |
881 | 886 | ||
882 | if (autodel == BATADV_IF_CLEANUP_AUTO) | 887 | if (autodel == BATADV_IF_CLEANUP_AUTO) |
@@ -912,7 +917,7 @@ batadv_hardif_add_interface(struct net_device *net_dev) | |||
912 | if (ret) | 917 | if (ret) |
913 | goto free_if; | 918 | goto free_if; |
914 | 919 | ||
915 | hard_iface->if_num = -1; | 920 | hard_iface->if_num = 0; |
916 | hard_iface->net_dev = net_dev; | 921 | hard_iface->net_dev = net_dev; |
917 | hard_iface->soft_iface = NULL; | 922 | hard_iface->soft_iface = NULL; |
918 | hard_iface->if_status = BATADV_IF_NOT_IN_USE; | 923 | hard_iface->if_status = BATADV_IF_NOT_IN_USE; |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 58a7d9274435..74782426bb77 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -1569,7 +1569,7 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb) | |||
1569 | * Return: 0 on success or negative error number in case of failure | 1569 | * Return: 0 on success or negative error number in case of failure |
1570 | */ | 1570 | */ |
1571 | int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, | 1571 | int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, |
1572 | int max_if_num) | 1572 | unsigned int max_if_num) |
1573 | { | 1573 | { |
1574 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 1574 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
1575 | struct batadv_algo_ops *bao = bat_priv->algo_ops; | 1575 | struct batadv_algo_ops *bao = bat_priv->algo_ops; |
@@ -1611,7 +1611,7 @@ err: | |||
1611 | * Return: 0 on success or negative error number in case of failure | 1611 | * Return: 0 on success or negative error number in case of failure |
1612 | */ | 1612 | */ |
1613 | int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, | 1613 | int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, |
1614 | int max_if_num) | 1614 | unsigned int max_if_num) |
1615 | { | 1615 | { |
1616 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 1616 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
1617 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 1617 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 8e543a3cdc6c..15d896b2de6f 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h | |||
@@ -73,9 +73,9 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset); | |||
73 | int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); | 73 | int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); |
74 | int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset); | 74 | int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset); |
75 | int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, | 75 | int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, |
76 | int max_if_num); | 76 | unsigned int max_if_num); |
77 | int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, | 77 | int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, |
78 | int max_if_num); | 78 | unsigned int max_if_num); |
79 | struct batadv_orig_node_vlan * | 79 | struct batadv_orig_node_vlan * |
80 | batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, | 80 | batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, |
81 | unsigned short vid); | 81 | unsigned short vid); |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 900c5ce21cd4..367a81fb785f 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -459,13 +459,7 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
459 | 459 | ||
460 | /* skb->dev & skb->pkt_type are set here */ | 460 | /* skb->dev & skb->pkt_type are set here */ |
461 | skb->protocol = eth_type_trans(skb, soft_iface); | 461 | skb->protocol = eth_type_trans(skb, soft_iface); |
462 | 462 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | |
463 | /* should not be necessary anymore as we use skb_pull_rcsum() | ||
464 | * TODO: please verify this and remove this TODO | ||
465 | * -- Dec 21st 2009, Simon Wunderlich | ||
466 | */ | ||
467 | |||
468 | /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ | ||
469 | 463 | ||
470 | batadv_inc_counter(bat_priv, BATADV_CNT_RX); | 464 | batadv_inc_counter(bat_priv, BATADV_CNT_RX); |
471 | batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, | 465 | batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index bb1578410e0c..a5aa6d61f4e2 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -167,7 +167,7 @@ struct batadv_hard_iface { | |||
167 | struct list_head list; | 167 | struct list_head list; |
168 | 168 | ||
169 | /** @if_num: identificator of the interface */ | 169 | /** @if_num: identificator of the interface */ |
170 | s16 if_num; | 170 | unsigned int if_num; |
171 | 171 | ||
172 | /** @if_status: status of the interface for batman-adv */ | 172 | /** @if_status: status of the interface for batman-adv */ |
173 | char if_status; | 173 | char if_status; |
@@ -1596,7 +1596,7 @@ struct batadv_priv { | |||
1596 | atomic_t batman_queue_left; | 1596 | atomic_t batman_queue_left; |
1597 | 1597 | ||
1598 | /** @num_ifaces: number of interfaces assigned to this mesh interface */ | 1598 | /** @num_ifaces: number of interfaces assigned to this mesh interface */ |
1599 | char num_ifaces; | 1599 | unsigned int num_ifaces; |
1600 | 1600 | ||
1601 | /** @mesh_obj: kobject for sysfs mesh subdirectory */ | 1601 | /** @mesh_obj: kobject for sysfs mesh subdirectory */ |
1602 | struct kobject *mesh_obj; | 1602 | struct kobject *mesh_obj; |
@@ -2186,15 +2186,16 @@ struct batadv_algo_orig_ops { | |||
2186 | * orig_node due to a new hard-interface being added into the mesh | 2186 | * orig_node due to a new hard-interface being added into the mesh |
2187 | * (optional) | 2187 | * (optional) |
2188 | */ | 2188 | */ |
2189 | int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num); | 2189 | int (*add_if)(struct batadv_orig_node *orig_node, |
2190 | unsigned int max_if_num); | ||
2190 | 2191 | ||
2191 | /** | 2192 | /** |
2192 | * @del_if: ask the routing algorithm to apply the needed changes to the | 2193 | * @del_if: ask the routing algorithm to apply the needed changes to the |
2193 | * orig_node due to an hard-interface being removed from the mesh | 2194 | * orig_node due to an hard-interface being removed from the mesh |
2194 | * (optional) | 2195 | * (optional) |
2195 | */ | 2196 | */ |
2196 | int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num, | 2197 | int (*del_if)(struct batadv_orig_node *orig_node, |
2197 | int del_if_num); | 2198 | unsigned int max_if_num, unsigned int del_if_num); |
2198 | 2199 | ||
2199 | #ifdef CONFIG_BATMAN_ADV_DEBUGFS | 2200 | #ifdef CONFIG_BATMAN_ADV_DEBUGFS |
2200 | /** @print: print the originator table (optional) */ | 2201 | /** @print: print the originator table (optional) */ |
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 27f1d4f2114a..9b16eaf33819 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c | |||
@@ -214,7 +214,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb) | |||
214 | 214 | ||
215 | iph = ip_hdr(skb); | 215 | iph = ip_hdr(skb); |
216 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) | 216 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) |
217 | goto inhdr_error; | 217 | goto csum_error; |
218 | 218 | ||
219 | len = ntohs(iph->tot_len); | 219 | len = ntohs(iph->tot_len); |
220 | if (skb->len < len) { | 220 | if (skb->len < len) { |
@@ -236,6 +236,8 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb) | |||
236 | */ | 236 | */ |
237 | return 0; | 237 | return 0; |
238 | 238 | ||
239 | csum_error: | ||
240 | __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS); | ||
239 | inhdr_error: | 241 | inhdr_error: |
240 | __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); | 242 | __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); |
241 | drop: | 243 | drop: |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 51935270c651..9896f4975353 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
@@ -168,6 +168,8 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid | |||
168 | masterv = br_vlan_find(vg, vid); | 168 | masterv = br_vlan_find(vg, vid); |
169 | if (WARN_ON(!masterv)) | 169 | if (WARN_ON(!masterv)) |
170 | return NULL; | 170 | return NULL; |
171 | refcount_set(&masterv->refcnt, 1); | ||
172 | return masterv; | ||
171 | } | 173 | } |
172 | refcount_inc(&masterv->refcnt); | 174 | refcount_inc(&masterv->refcnt); |
173 | 175 | ||
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c index ce7152a12bd8..c5afb4232ecb 100644 --- a/net/bridge/netfilter/ebt_among.c +++ b/net/bridge/netfilter/ebt_among.c | |||
@@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
172 | return true; | 172 | return true; |
173 | } | 173 | } |
174 | 174 | ||
175 | static bool poolsize_invalid(const struct ebt_mac_wormhash *w) | ||
176 | { | ||
177 | return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple)); | ||
178 | } | ||
179 | |||
175 | static int ebt_among_mt_check(const struct xt_mtchk_param *par) | 180 | static int ebt_among_mt_check(const struct xt_mtchk_param *par) |
176 | { | 181 | { |
177 | const struct ebt_among_info *info = par->matchinfo; | 182 | const struct ebt_among_info *info = par->matchinfo; |
178 | const struct ebt_entry_match *em = | 183 | const struct ebt_entry_match *em = |
179 | container_of(par->matchinfo, const struct ebt_entry_match, data); | 184 | container_of(par->matchinfo, const struct ebt_entry_match, data); |
180 | int expected_length = sizeof(struct ebt_among_info); | 185 | unsigned int expected_length = sizeof(struct ebt_among_info); |
181 | const struct ebt_mac_wormhash *wh_dst, *wh_src; | 186 | const struct ebt_mac_wormhash *wh_dst, *wh_src; |
182 | int err; | 187 | int err; |
183 | 188 | ||
189 | if (expected_length > em->match_size) | ||
190 | return -EINVAL; | ||
191 | |||
184 | wh_dst = ebt_among_wh_dst(info); | 192 | wh_dst = ebt_among_wh_dst(info); |
185 | wh_src = ebt_among_wh_src(info); | 193 | if (poolsize_invalid(wh_dst)) |
194 | return -EINVAL; | ||
195 | |||
186 | expected_length += ebt_mac_wormhash_size(wh_dst); | 196 | expected_length += ebt_mac_wormhash_size(wh_dst); |
197 | if (expected_length > em->match_size) | ||
198 | return -EINVAL; | ||
199 | |||
200 | wh_src = ebt_among_wh_src(info); | ||
201 | if (poolsize_invalid(wh_src)) | ||
202 | return -EINVAL; | ||
203 | |||
187 | expected_length += ebt_mac_wormhash_size(wh_src); | 204 | expected_length += ebt_mac_wormhash_size(wh_src); |
188 | 205 | ||
189 | if (em->match_size != EBT_ALIGN(expected_length)) { | 206 | if (em->match_size != EBT_ALIGN(expected_length)) { |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 02c4b409d317..254ef9f49567 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1641,7 +1641,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, | |||
1641 | int off = ebt_compat_match_offset(match, m->match_size); | 1641 | int off = ebt_compat_match_offset(match, m->match_size); |
1642 | compat_uint_t msize = m->match_size - off; | 1642 | compat_uint_t msize = m->match_size - off; |
1643 | 1643 | ||
1644 | BUG_ON(off >= m->match_size); | 1644 | if (WARN_ON(off >= m->match_size)) |
1645 | return -EINVAL; | ||
1645 | 1646 | ||
1646 | if (copy_to_user(cm->u.name, match->name, | 1647 | if (copy_to_user(cm->u.name, match->name, |
1647 | strlen(match->name) + 1) || put_user(msize, &cm->match_size)) | 1648 | strlen(match->name) + 1) || put_user(msize, &cm->match_size)) |
@@ -1671,7 +1672,8 @@ static int compat_target_to_user(struct ebt_entry_target *t, | |||
1671 | int off = xt_compat_target_offset(target); | 1672 | int off = xt_compat_target_offset(target); |
1672 | compat_uint_t tsize = t->target_size - off; | 1673 | compat_uint_t tsize = t->target_size - off; |
1673 | 1674 | ||
1674 | BUG_ON(off >= t->target_size); | 1675 | if (WARN_ON(off >= t->target_size)) |
1676 | return -EINVAL; | ||
1675 | 1677 | ||
1676 | if (copy_to_user(cm->u.name, target->name, | 1678 | if (copy_to_user(cm->u.name, target->name, |
1677 | strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) | 1679 | strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) |
@@ -1902,7 +1904,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state, | |||
1902 | if (state->buf_kern_start == NULL) | 1904 | if (state->buf_kern_start == NULL) |
1903 | goto count_only; | 1905 | goto count_only; |
1904 | 1906 | ||
1905 | BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); | 1907 | if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len)) |
1908 | return -EINVAL; | ||
1906 | 1909 | ||
1907 | memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); | 1910 | memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); |
1908 | 1911 | ||
@@ -1915,7 +1918,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) | |||
1915 | { | 1918 | { |
1916 | char *b = state->buf_kern_start; | 1919 | char *b = state->buf_kern_start; |
1917 | 1920 | ||
1918 | BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); | 1921 | if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len)) |
1922 | return -EINVAL; | ||
1919 | 1923 | ||
1920 | if (b != NULL && sz > 0) | 1924 | if (b != NULL && sz > 0) |
1921 | memset(b + state->buf_kern_offset, 0, sz); | 1925 | memset(b + state->buf_kern_offset, 0, sz); |
@@ -1992,8 +1996,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, | |||
1992 | pad = XT_ALIGN(size_kern) - size_kern; | 1996 | pad = XT_ALIGN(size_kern) - size_kern; |
1993 | 1997 | ||
1994 | if (pad > 0 && dst) { | 1998 | if (pad > 0 && dst) { |
1995 | BUG_ON(state->buf_kern_len <= pad); | 1999 | if (WARN_ON(state->buf_kern_len <= pad)) |
1996 | BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); | 2000 | return -EINVAL; |
2001 | if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad)) | ||
2002 | return -EINVAL; | ||
1997 | memset(dst + size_kern, 0, pad); | 2003 | memset(dst + size_kern, 0, pad); |
1998 | } | 2004 | } |
1999 | return off + match_size; | 2005 | return off + match_size; |
@@ -2043,7 +2049,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, | |||
2043 | if (ret < 0) | 2049 | if (ret < 0) |
2044 | return ret; | 2050 | return ret; |
2045 | 2051 | ||
2046 | BUG_ON(ret < match32->match_size); | 2052 | if (WARN_ON(ret < match32->match_size)) |
2053 | return -EINVAL; | ||
2047 | growth += ret - match32->match_size; | 2054 | growth += ret - match32->match_size; |
2048 | growth += ebt_compat_entry_padsize(); | 2055 | growth += ebt_compat_entry_padsize(); |
2049 | 2056 | ||
@@ -2053,7 +2060,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, | |||
2053 | if (match_kern) | 2060 | if (match_kern) |
2054 | match_kern->match_size = ret; | 2061 | match_kern->match_size = ret; |
2055 | 2062 | ||
2056 | WARN_ON(type == EBT_COMPAT_TARGET && size_left); | 2063 | if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) |
2064 | return -EINVAL; | ||
2065 | |||
2057 | match32 = (struct compat_ebt_entry_mwt *) buf; | 2066 | match32 = (struct compat_ebt_entry_mwt *) buf; |
2058 | } | 2067 | } |
2059 | 2068 | ||
@@ -2109,6 +2118,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, | |||
2109 | * | 2118 | * |
2110 | * offsets are relative to beginning of struct ebt_entry (i.e., 0). | 2119 | * offsets are relative to beginning of struct ebt_entry (i.e., 0). |
2111 | */ | 2120 | */ |
2121 | for (i = 0; i < 4 ; ++i) { | ||
2122 | if (offsets[i] >= *total) | ||
2123 | return -EINVAL; | ||
2124 | if (i == 0) | ||
2125 | continue; | ||
2126 | if (offsets[i-1] > offsets[i]) | ||
2127 | return -EINVAL; | ||
2128 | } | ||
2129 | |||
2112 | for (i = 0, j = 1 ; j < 4 ; j++, i++) { | 2130 | for (i = 0, j = 1 ; j < 4 ; j++, i++) { |
2113 | struct compat_ebt_entry_mwt *match32; | 2131 | struct compat_ebt_entry_mwt *match32; |
2114 | unsigned int size; | 2132 | unsigned int size; |
@@ -2140,7 +2158,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, | |||
2140 | 2158 | ||
2141 | startoff = state->buf_user_offset - startoff; | 2159 | startoff = state->buf_user_offset - startoff; |
2142 | 2160 | ||
2143 | BUG_ON(*total < startoff); | 2161 | if (WARN_ON(*total < startoff)) |
2162 | return -EINVAL; | ||
2144 | *total -= startoff; | 2163 | *total -= startoff; |
2145 | return 0; | 2164 | return 0; |
2146 | } | 2165 | } |
@@ -2267,7 +2286,8 @@ static int compat_do_replace(struct net *net, void __user *user, | |||
2267 | state.buf_kern_len = size64; | 2286 | state.buf_kern_len = size64; |
2268 | 2287 | ||
2269 | ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); | 2288 | ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); |
2270 | BUG_ON(ret < 0); /* parses same data again */ | 2289 | if (WARN_ON(ret < 0)) |
2290 | goto out_unlock; | ||
2271 | 2291 | ||
2272 | vfree(entries_tmp); | 2292 | vfree(entries_tmp); |
2273 | tmp.entries_size = size64; | 2293 | tmp.entries_size = size64; |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 1e492ef2a33d..4d4c82229e9e 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
@@ -418,6 +418,7 @@ ceph_parse_options(char *options, const char *dev_name, | |||
418 | opt->flags |= CEPH_OPT_FSID; | 418 | opt->flags |= CEPH_OPT_FSID; |
419 | break; | 419 | break; |
420 | case Opt_name: | 420 | case Opt_name: |
421 | kfree(opt->name); | ||
421 | opt->name = kstrndup(argstr[0].from, | 422 | opt->name = kstrndup(argstr[0].from, |
422 | argstr[0].to-argstr[0].from, | 423 | argstr[0].to-argstr[0].from, |
423 | GFP_KERNEL); | 424 | GFP_KERNEL); |
@@ -427,6 +428,9 @@ ceph_parse_options(char *options, const char *dev_name, | |||
427 | } | 428 | } |
428 | break; | 429 | break; |
429 | case Opt_secret: | 430 | case Opt_secret: |
431 | ceph_crypto_key_destroy(opt->key); | ||
432 | kfree(opt->key); | ||
433 | |||
430 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); | 434 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); |
431 | if (!opt->key) { | 435 | if (!opt->key) { |
432 | err = -ENOMEM; | 436 | err = -ENOMEM; |
@@ -437,6 +441,9 @@ ceph_parse_options(char *options, const char *dev_name, | |||
437 | goto out; | 441 | goto out; |
438 | break; | 442 | break; |
439 | case Opt_key: | 443 | case Opt_key: |
444 | ceph_crypto_key_destroy(opt->key); | ||
445 | kfree(opt->key); | ||
446 | |||
440 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); | 447 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); |
441 | if (!opt->key) { | 448 | if (!opt->key) { |
442 | err = -ENOMEM; | 449 | err = -ENOMEM; |
diff --git a/net/core/dev.c b/net/core/dev.c index d4362befe7e2..2cedf520cb28 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -6396,6 +6396,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, | |||
6396 | .linking = true, | 6396 | .linking = true, |
6397 | .upper_info = upper_info, | 6397 | .upper_info = upper_info, |
6398 | }; | 6398 | }; |
6399 | struct net_device *master_dev; | ||
6399 | int ret = 0; | 6400 | int ret = 0; |
6400 | 6401 | ||
6401 | ASSERT_RTNL(); | 6402 | ASSERT_RTNL(); |
@@ -6407,11 +6408,14 @@ static int __netdev_upper_dev_link(struct net_device *dev, | |||
6407 | if (netdev_has_upper_dev(upper_dev, dev)) | 6408 | if (netdev_has_upper_dev(upper_dev, dev)) |
6408 | return -EBUSY; | 6409 | return -EBUSY; |
6409 | 6410 | ||
6410 | if (netdev_has_upper_dev(dev, upper_dev)) | 6411 | if (!master) { |
6411 | return -EEXIST; | 6412 | if (netdev_has_upper_dev(dev, upper_dev)) |
6412 | 6413 | return -EEXIST; | |
6413 | if (master && netdev_master_upper_dev_get(dev)) | 6414 | } else { |
6414 | return -EBUSY; | 6415 | master_dev = netdev_master_upper_dev_get(dev); |
6416 | if (master_dev) | ||
6417 | return master_dev == upper_dev ? -EEXIST : -EBUSY; | ||
6418 | } | ||
6415 | 6419 | ||
6416 | ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, | 6420 | ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, |
6417 | &changeupper_info.info); | 6421 | &changeupper_info.info); |
diff --git a/net/core/devlink.c b/net/core/devlink.c index 18d385ed8237..2f2307d94787 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c | |||
@@ -1695,10 +1695,11 @@ static int devlink_dpipe_table_put(struct sk_buff *skb, | |||
1695 | goto nla_put_failure; | 1695 | goto nla_put_failure; |
1696 | 1696 | ||
1697 | if (table->resource_valid) { | 1697 | if (table->resource_valid) { |
1698 | nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, | 1698 | if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, |
1699 | table->resource_id, DEVLINK_ATTR_PAD); | 1699 | table->resource_id, DEVLINK_ATTR_PAD) || |
1700 | nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS, | 1700 | nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS, |
1701 | table->resource_units, DEVLINK_ATTR_PAD); | 1701 | table->resource_units, DEVLINK_ATTR_PAD)) |
1702 | goto nla_put_failure; | ||
1702 | } | 1703 | } |
1703 | if (devlink_dpipe_matches_put(table, skb)) | 1704 | if (devlink_dpipe_matches_put(table, skb)) |
1704 | goto nla_put_failure; | 1705 | goto nla_put_failure; |
@@ -2332,7 +2333,7 @@ devlink_resource_validate_children(struct devlink_resource *resource) | |||
2332 | list_for_each_entry(child_resource, &resource->resource_list, list) | 2333 | list_for_each_entry(child_resource, &resource->resource_list, list) |
2333 | parts_size += child_resource->size_new; | 2334 | parts_size += child_resource->size_new; |
2334 | 2335 | ||
2335 | if (parts_size > resource->size) | 2336 | if (parts_size > resource->size_new) |
2336 | size_valid = false; | 2337 | size_valid = false; |
2337 | out: | 2338 | out: |
2338 | resource->size_valid = size_valid; | 2339 | resource->size_valid = size_valid; |
@@ -2372,20 +2373,22 @@ static int devlink_nl_cmd_resource_set(struct sk_buff *skb, | |||
2372 | return 0; | 2373 | return 0; |
2373 | } | 2374 | } |
2374 | 2375 | ||
2375 | static void | 2376 | static int |
2376 | devlink_resource_size_params_put(struct devlink_resource *resource, | 2377 | devlink_resource_size_params_put(struct devlink_resource *resource, |
2377 | struct sk_buff *skb) | 2378 | struct sk_buff *skb) |
2378 | { | 2379 | { |
2379 | struct devlink_resource_size_params *size_params; | 2380 | struct devlink_resource_size_params *size_params; |
2380 | 2381 | ||
2381 | size_params = resource->size_params; | 2382 | size_params = &resource->size_params; |
2382 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN, | 2383 | if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN, |
2383 | size_params->size_granularity, DEVLINK_ATTR_PAD); | 2384 | size_params->size_granularity, DEVLINK_ATTR_PAD) || |
2384 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX, | 2385 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX, |
2385 | size_params->size_max, DEVLINK_ATTR_PAD); | 2386 | size_params->size_max, DEVLINK_ATTR_PAD) || |
2386 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN, | 2387 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN, |
2387 | size_params->size_min, DEVLINK_ATTR_PAD); | 2388 | size_params->size_min, DEVLINK_ATTR_PAD) || |
2388 | nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit); | 2389 | nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit)) |
2390 | return -EMSGSIZE; | ||
2391 | return 0; | ||
2389 | } | 2392 | } |
2390 | 2393 | ||
2391 | static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, | 2394 | static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, |
@@ -2409,10 +2412,12 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, | |||
2409 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW, | 2412 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW, |
2410 | resource->size_new, DEVLINK_ATTR_PAD); | 2413 | resource->size_new, DEVLINK_ATTR_PAD); |
2411 | if (resource->resource_ops && resource->resource_ops->occ_get) | 2414 | if (resource->resource_ops && resource->resource_ops->occ_get) |
2412 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, | 2415 | if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, |
2413 | resource->resource_ops->occ_get(devlink), | 2416 | resource->resource_ops->occ_get(devlink), |
2414 | DEVLINK_ATTR_PAD); | 2417 | DEVLINK_ATTR_PAD)) |
2415 | devlink_resource_size_params_put(resource, skb); | 2418 | goto nla_put_failure; |
2419 | if (devlink_resource_size_params_put(resource, skb)) | ||
2420 | goto nla_put_failure; | ||
2416 | if (list_empty(&resource->resource_list)) | 2421 | if (list_empty(&resource->resource_list)) |
2417 | goto out; | 2422 | goto out; |
2418 | 2423 | ||
@@ -3151,7 +3156,7 @@ int devlink_resource_register(struct devlink *devlink, | |||
3151 | u64 resource_size, | 3156 | u64 resource_size, |
3152 | u64 resource_id, | 3157 | u64 resource_id, |
3153 | u64 parent_resource_id, | 3158 | u64 parent_resource_id, |
3154 | struct devlink_resource_size_params *size_params, | 3159 | const struct devlink_resource_size_params *size_params, |
3155 | const struct devlink_resource_ops *resource_ops) | 3160 | const struct devlink_resource_ops *resource_ops) |
3156 | { | 3161 | { |
3157 | struct devlink_resource *resource; | 3162 | struct devlink_resource *resource; |
@@ -3194,7 +3199,8 @@ int devlink_resource_register(struct devlink *devlink, | |||
3194 | resource->id = resource_id; | 3199 | resource->id = resource_id; |
3195 | resource->resource_ops = resource_ops; | 3200 | resource->resource_ops = resource_ops; |
3196 | resource->size_valid = true; | 3201 | resource->size_valid = true; |
3197 | resource->size_params = size_params; | 3202 | memcpy(&resource->size_params, size_params, |
3203 | sizeof(resource->size_params)); | ||
3198 | INIT_LIST_HEAD(&resource->resource_list); | 3204 | INIT_LIST_HEAD(&resource->resource_list); |
3199 | list_add_tail(&resource->list, resource_list); | 3205 | list_add_tail(&resource->list, resource_list); |
3200 | out: | 3206 | out: |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 494e6a5d7306..3f89c76d5c24 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -2520,11 +2520,14 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr) | |||
2520 | static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) | 2520 | static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) |
2521 | { | 2521 | { |
2522 | struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; | 2522 | struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; |
2523 | int rc; | ||
2523 | 2524 | ||
2524 | if (!dev->ethtool_ops->get_fecparam) | 2525 | if (!dev->ethtool_ops->get_fecparam) |
2525 | return -EOPNOTSUPP; | 2526 | return -EOPNOTSUPP; |
2526 | 2527 | ||
2527 | dev->ethtool_ops->get_fecparam(dev, &fecparam); | 2528 | rc = dev->ethtool_ops->get_fecparam(dev, &fecparam); |
2529 | if (rc) | ||
2530 | return rc; | ||
2528 | 2531 | ||
2529 | if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) | 2532 | if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) |
2530 | return -EFAULT; | 2533 | return -EFAULT; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 09bd89c90a71..0bb0d8877954 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4891,7 +4891,7 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet); | |||
4891 | * | 4891 | * |
4892 | * The MAC/L2 or network (IP, IPv6) headers are not accounted for. | 4892 | * The MAC/L2 or network (IP, IPv6) headers are not accounted for. |
4893 | */ | 4893 | */ |
4894 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) | 4894 | static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
4895 | { | 4895 | { |
4896 | const struct skb_shared_info *shinfo = skb_shinfo(skb); | 4896 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
4897 | unsigned int thlen = 0; | 4897 | unsigned int thlen = 0; |
@@ -4913,7 +4913,40 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) | |||
4913 | */ | 4913 | */ |
4914 | return thlen + shinfo->gso_size; | 4914 | return thlen + shinfo->gso_size; |
4915 | } | 4915 | } |
4916 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); | 4916 | |
4917 | /** | ||
4918 | * skb_gso_network_seglen - Return length of individual segments of a gso packet | ||
4919 | * | ||
4920 | * @skb: GSO skb | ||
4921 | * | ||
4922 | * skb_gso_network_seglen is used to determine the real size of the | ||
4923 | * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). | ||
4924 | * | ||
4925 | * The MAC/L2 header is not accounted for. | ||
4926 | */ | ||
4927 | static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) | ||
4928 | { | ||
4929 | unsigned int hdr_len = skb_transport_header(skb) - | ||
4930 | skb_network_header(skb); | ||
4931 | |||
4932 | return hdr_len + skb_gso_transport_seglen(skb); | ||
4933 | } | ||
4934 | |||
4935 | /** | ||
4936 | * skb_gso_mac_seglen - Return length of individual segments of a gso packet | ||
4937 | * | ||
4938 | * @skb: GSO skb | ||
4939 | * | ||
4940 | * skb_gso_mac_seglen is used to determine the real size of the | ||
4941 | * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 | ||
4942 | * headers (TCP/UDP). | ||
4943 | */ | ||
4944 | static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) | ||
4945 | { | ||
4946 | unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); | ||
4947 | |||
4948 | return hdr_len + skb_gso_transport_seglen(skb); | ||
4949 | } | ||
4917 | 4950 | ||
4918 | /** | 4951 | /** |
4919 | * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS | 4952 | * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS |
@@ -4955,19 +4988,20 @@ static inline bool skb_gso_size_check(const struct sk_buff *skb, | |||
4955 | } | 4988 | } |
4956 | 4989 | ||
4957 | /** | 4990 | /** |
4958 | * skb_gso_validate_mtu - Return in case such skb fits a given MTU | 4991 | * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? |
4959 | * | 4992 | * |
4960 | * @skb: GSO skb | 4993 | * @skb: GSO skb |
4961 | * @mtu: MTU to validate against | 4994 | * @mtu: MTU to validate against |
4962 | * | 4995 | * |
4963 | * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU | 4996 | * skb_gso_validate_network_len validates if a given skb will fit a |
4964 | * once split. | 4997 | * wanted MTU once split. It considers L3 headers, L4 headers, and the |
4998 | * payload. | ||
4965 | */ | 4999 | */ |
4966 | bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) | 5000 | bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) |
4967 | { | 5001 | { |
4968 | return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); | 5002 | return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); |
4969 | } | 5003 | } |
4970 | EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); | 5004 | EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); |
4971 | 5005 | ||
4972 | /** | 5006 | /** |
4973 | * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? | 5007 | * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 2dd21c3281a1..b54b948b0596 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -55,7 +55,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) | |||
55 | if (skb->ignore_df) | 55 | if (skb->ignore_df) |
56 | return false; | 56 | return false; |
57 | 57 | ||
58 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 58 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
59 | return false; | 59 | return false; |
60 | 60 | ||
61 | return true; | 61 | return true; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 45d97e9b2759..0901de42ed85 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -970,9 +970,6 @@ static void __gre_tunnel_init(struct net_device *dev) | |||
970 | 970 | ||
971 | t_hlen = tunnel->hlen + sizeof(struct iphdr); | 971 | t_hlen = tunnel->hlen + sizeof(struct iphdr); |
972 | 972 | ||
973 | dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; | ||
974 | dev->mtu = ETH_DATA_LEN - t_hlen - 4; | ||
975 | |||
976 | dev->features |= GRE_FEATURES; | 973 | dev->features |= GRE_FEATURES; |
977 | dev->hw_features |= GRE_FEATURES; | 974 | dev->hw_features |= GRE_FEATURES; |
978 | 975 | ||
@@ -1290,8 +1287,6 @@ static int erspan_tunnel_init(struct net_device *dev) | |||
1290 | erspan_hdr_len(tunnel->erspan_ver); | 1287 | erspan_hdr_len(tunnel->erspan_ver); |
1291 | t_hlen = tunnel->hlen + sizeof(struct iphdr); | 1288 | t_hlen = tunnel->hlen + sizeof(struct iphdr); |
1292 | 1289 | ||
1293 | dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; | ||
1294 | dev->mtu = ETH_DATA_LEN - t_hlen - 4; | ||
1295 | dev->features |= GRE_FEATURES; | 1290 | dev->features |= GRE_FEATURES; |
1296 | dev->hw_features |= GRE_FEATURES; | 1291 | dev->hw_features |= GRE_FEATURES; |
1297 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1292 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e8e675be60ec..66340ab750e6 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -248,7 +248,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, | |||
248 | 248 | ||
249 | /* common case: seglen is <= mtu | 249 | /* common case: seglen is <= mtu |
250 | */ | 250 | */ |
251 | if (skb_gso_validate_mtu(skb, mtu)) | 251 | if (skb_gso_validate_network_len(skb, mtu)) |
252 | return ip_finish_output2(net, sk, skb); | 252 | return ip_finish_output2(net, sk, skb); |
253 | 253 | ||
254 | /* Slowpath - GSO segment length exceeds the egress MTU. | 254 | /* Slowpath - GSO segment length exceeds the egress MTU. |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index d786a8441bce..6d21068f9b55 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -710,16 +710,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
710 | } | 710 | } |
711 | } | 711 | } |
712 | 712 | ||
713 | if (tunnel->fwmark) { | 713 | init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, |
714 | init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, | 714 | tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, |
715 | tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, | 715 | tunnel->fwmark); |
716 | tunnel->fwmark); | ||
717 | } | ||
718 | else { | ||
719 | init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, | ||
720 | tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, | ||
721 | skb->mark); | ||
722 | } | ||
723 | 716 | ||
724 | if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) | 717 | if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) |
725 | goto tx_error; | 718 | goto tx_error; |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 4b02ab39ebc5..8a8ae61cea71 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -232,7 +232,6 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, | |||
232 | c->hash_mode = i->hash_mode; | 232 | c->hash_mode = i->hash_mode; |
233 | c->hash_initval = i->hash_initval; | 233 | c->hash_initval = i->hash_initval; |
234 | refcount_set(&c->refcount, 1); | 234 | refcount_set(&c->refcount, 1); |
235 | refcount_set(&c->entries, 1); | ||
236 | 235 | ||
237 | spin_lock_bh(&cn->lock); | 236 | spin_lock_bh(&cn->lock); |
238 | if (__clusterip_config_find(net, ip)) { | 237 | if (__clusterip_config_find(net, ip)) { |
@@ -263,8 +262,10 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, | |||
263 | 262 | ||
264 | c->notifier.notifier_call = clusterip_netdev_event; | 263 | c->notifier.notifier_call = clusterip_netdev_event; |
265 | err = register_netdevice_notifier(&c->notifier); | 264 | err = register_netdevice_notifier(&c->notifier); |
266 | if (!err) | 265 | if (!err) { |
266 | refcount_set(&c->entries, 1); | ||
267 | return c; | 267 | return c; |
268 | } | ||
268 | 269 | ||
269 | #ifdef CONFIG_PROC_FS | 270 | #ifdef CONFIG_PROC_FS |
270 | proc_remove(c->pde); | 271 | proc_remove(c->pde); |
@@ -273,7 +274,7 @@ err: | |||
273 | spin_lock_bh(&cn->lock); | 274 | spin_lock_bh(&cn->lock); |
274 | list_del_rcu(&c->list); | 275 | list_del_rcu(&c->list); |
275 | spin_unlock_bh(&cn->lock); | 276 | spin_unlock_bh(&cn->lock); |
276 | kfree(c); | 277 | clusterip_config_put(c); |
277 | 278 | ||
278 | return ERR_PTR(err); | 279 | return ERR_PTR(err); |
279 | } | 280 | } |
@@ -496,12 +497,15 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) | |||
496 | return PTR_ERR(config); | 497 | return PTR_ERR(config); |
497 | } | 498 | } |
498 | } | 499 | } |
499 | cipinfo->config = config; | ||
500 | 500 | ||
501 | ret = nf_ct_netns_get(par->net, par->family); | 501 | ret = nf_ct_netns_get(par->net, par->family); |
502 | if (ret < 0) | 502 | if (ret < 0) { |
503 | pr_info("cannot load conntrack support for proto=%u\n", | 503 | pr_info("cannot load conntrack support for proto=%u\n", |
504 | par->family); | 504 | par->family); |
505 | clusterip_config_entry_put(par->net, config); | ||
506 | clusterip_config_put(config); | ||
507 | return ret; | ||
508 | } | ||
505 | 509 | ||
506 | if (!par->net->xt.clusterip_deprecated_warning) { | 510 | if (!par->net->xt.clusterip_deprecated_warning) { |
507 | pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " | 511 | pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " |
@@ -509,6 +513,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) | |||
509 | par->net->xt.clusterip_deprecated_warning = true; | 513 | par->net->xt.clusterip_deprecated_warning = true; |
510 | } | 514 | } |
511 | 515 | ||
516 | cipinfo->config = config; | ||
512 | return ret; | 517 | return ret; |
513 | } | 518 | } |
514 | 519 | ||
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c index 25d2975da156..0cd46bffa469 100644 --- a/net/ipv4/netfilter/nf_flow_table_ipv4.c +++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c | |||
@@ -111,6 +111,7 @@ static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb, | |||
111 | default: | 111 | default: |
112 | return -1; | 112 | return -1; |
113 | } | 113 | } |
114 | csum_replace4(&iph->check, addr, new_addr); | ||
114 | 115 | ||
115 | return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); | 116 | return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); |
116 | } | 117 | } |
@@ -185,7 +186,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) | |||
185 | if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) | 186 | if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) |
186 | return false; | 187 | return false; |
187 | 188 | ||
188 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 189 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
189 | return false; | 190 | return false; |
190 | 191 | ||
191 | return true; | 192 | return true; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a4f44d815a61..860b3fd2f54b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -128,10 +128,11 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1)); | |||
128 | static int ip_rt_error_cost __read_mostly = HZ; | 128 | static int ip_rt_error_cost __read_mostly = HZ; |
129 | static int ip_rt_error_burst __read_mostly = 5 * HZ; | 129 | static int ip_rt_error_burst __read_mostly = 5 * HZ; |
130 | static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; | 130 | static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; |
131 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; | 131 | static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; |
132 | static int ip_rt_min_advmss __read_mostly = 256; | 132 | static int ip_rt_min_advmss __read_mostly = 256; |
133 | 133 | ||
134 | static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; | 134 | static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; |
135 | |||
135 | /* | 136 | /* |
136 | * Interface to generic destination cache. | 137 | * Interface to generic destination cache. |
137 | */ | 138 | */ |
@@ -930,14 +931,23 @@ out_put_peer: | |||
930 | 931 | ||
931 | static int ip_error(struct sk_buff *skb) | 932 | static int ip_error(struct sk_buff *skb) |
932 | { | 933 | { |
933 | struct in_device *in_dev = __in_dev_get_rcu(skb->dev); | ||
934 | struct rtable *rt = skb_rtable(skb); | 934 | struct rtable *rt = skb_rtable(skb); |
935 | struct net_device *dev = skb->dev; | ||
936 | struct in_device *in_dev; | ||
935 | struct inet_peer *peer; | 937 | struct inet_peer *peer; |
936 | unsigned long now; | 938 | unsigned long now; |
937 | struct net *net; | 939 | struct net *net; |
938 | bool send; | 940 | bool send; |
939 | int code; | 941 | int code; |
940 | 942 | ||
943 | if (netif_is_l3_master(skb->dev)) { | ||
944 | dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif); | ||
945 | if (!dev) | ||
946 | goto out; | ||
947 | } | ||
948 | |||
949 | in_dev = __in_dev_get_rcu(dev); | ||
950 | |||
941 | /* IP on this device is disabled. */ | 951 | /* IP on this device is disabled. */ |
942 | if (!in_dev) | 952 | if (!in_dev) |
943 | goto out; | 953 | goto out; |
@@ -2818,6 +2828,7 @@ void ip_rt_multicast_event(struct in_device *in_dev) | |||
2818 | static int ip_rt_gc_interval __read_mostly = 60 * HZ; | 2828 | static int ip_rt_gc_interval __read_mostly = 60 * HZ; |
2819 | static int ip_rt_gc_min_interval __read_mostly = HZ / 2; | 2829 | static int ip_rt_gc_min_interval __read_mostly = HZ / 2; |
2820 | static int ip_rt_gc_elasticity __read_mostly = 8; | 2830 | static int ip_rt_gc_elasticity __read_mostly = 8; |
2831 | static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU; | ||
2821 | 2832 | ||
2822 | static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, | 2833 | static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, |
2823 | void __user *buffer, | 2834 | void __user *buffer, |
@@ -2933,7 +2944,8 @@ static struct ctl_table ipv4_route_table[] = { | |||
2933 | .data = &ip_rt_min_pmtu, | 2944 | .data = &ip_rt_min_pmtu, |
2934 | .maxlen = sizeof(int), | 2945 | .maxlen = sizeof(int), |
2935 | .mode = 0644, | 2946 | .mode = 0644, |
2936 | .proc_handler = proc_dointvec, | 2947 | .proc_handler = proc_dointvec_minmax, |
2948 | .extra1 = &ip_min_valid_pmtu, | ||
2937 | }, | 2949 | }, |
2938 | { | 2950 | { |
2939 | .procname = "min_adv_mss", | 2951 | .procname = "min_adv_mss", |
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 7c843578f233..faddf4f9a707 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * The algorithm is described in: | 6 | * The algorithm is described in: |
7 | * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm | 7 | * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm |
8 | * for High-Speed Networks" | 8 | * for High-Speed Networks" |
9 | * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf | 9 | * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf |
10 | * | 10 | * |
11 | * Implemented from description in paper and ns-2 simulation. | 11 | * Implemented from description in paper and ns-2 simulation. |
12 | * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> | 12 | * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 575d3c1fb6e8..9a1b3c1c1c14 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1971,11 +1971,6 @@ void tcp_enter_loss(struct sock *sk) | |||
1971 | /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous | 1971 | /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous |
1972 | * loss recovery is underway except recurring timeout(s) on | 1972 | * loss recovery is underway except recurring timeout(s) on |
1973 | * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing | 1973 | * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing |
1974 | * | ||
1975 | * In theory F-RTO can be used repeatedly during loss recovery. | ||
1976 | * In practice this interacts badly with broken middle-boxes that | ||
1977 | * falsely raise the receive window, which results in repeated | ||
1978 | * timeouts and stop-and-go behavior. | ||
1979 | */ | 1974 | */ |
1980 | tp->frto = net->ipv4.sysctl_tcp_frto && | 1975 | tp->frto = net->ipv4.sysctl_tcp_frto && |
1981 | (new_recovery || icsk->icsk_retransmits) && | 1976 | (new_recovery || icsk->icsk_retransmits) && |
@@ -2631,18 +2626,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, | |||
2631 | tcp_try_undo_loss(sk, false)) | 2626 | tcp_try_undo_loss(sk, false)) |
2632 | return; | 2627 | return; |
2633 | 2628 | ||
2634 | /* The ACK (s)acks some never-retransmitted data meaning not all | ||
2635 | * the data packets before the timeout were lost. Therefore we | ||
2636 | * undo the congestion window and state. This is essentially | ||
2637 | * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since | ||
2638 | * a retransmitted skb is permantly marked, we can apply such an | ||
2639 | * operation even if F-RTO was not used. | ||
2640 | */ | ||
2641 | if ((flag & FLAG_ORIG_SACK_ACKED) && | ||
2642 | tcp_try_undo_loss(sk, tp->undo_marker)) | ||
2643 | return; | ||
2644 | |||
2645 | if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ | 2629 | if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ |
2630 | /* Step 3.b. A timeout is spurious if not all data are | ||
2631 | * lost, i.e., never-retransmitted data are (s)acked. | ||
2632 | */ | ||
2633 | if ((flag & FLAG_ORIG_SACK_ACKED) && | ||
2634 | tcp_try_undo_loss(sk, true)) | ||
2635 | return; | ||
2636 | |||
2646 | if (after(tp->snd_nxt, tp->high_seq)) { | 2637 | if (after(tp->snd_nxt, tp->high_seq)) { |
2647 | if (flag & FLAG_DATA_SACKED || is_dupack) | 2638 | if (flag & FLAG_DATA_SACKED || is_dupack) |
2648 | tp->frto = 0; /* Step 3.a. loss was real */ | 2639 | tp->frto = 0; /* Step 3.a. loss was real */ |
@@ -4001,6 +3992,7 @@ void tcp_reset(struct sock *sk) | |||
4001 | /* This barrier is coupled with smp_rmb() in tcp_poll() */ | 3992 | /* This barrier is coupled with smp_rmb() in tcp_poll() */ |
4002 | smp_wmb(); | 3993 | smp_wmb(); |
4003 | 3994 | ||
3995 | tcp_write_queue_purge(sk); | ||
4004 | tcp_done(sk); | 3996 | tcp_done(sk); |
4005 | 3997 | ||
4006 | if (!sock_flag(sk, SOCK_DEAD)) | 3998 | if (!sock_flag(sk, SOCK_DEAD)) |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 94b8702603bc..be980c195fc5 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -30,7 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) | |||
30 | 30 | ||
31 | mtu = dst_mtu(skb_dst(skb)); | 31 | mtu = dst_mtu(skb_dst(skb)); |
32 | if ((!skb_is_gso(skb) && skb->len > mtu) || | 32 | if ((!skb_is_gso(skb) && skb->len > mtu) || |
33 | (skb_is_gso(skb) && skb_gso_network_seglen(skb) > ip_skb_dst_mtu(skb->sk, skb))) { | 33 | (skb_is_gso(skb) && |
34 | !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) { | ||
34 | skb->protocol = htons(ETH_P_IP); | 35 | skb->protocol = htons(ETH_P_IP); |
35 | 36 | ||
36 | if (skb->sk) | 37 | if (skb->sk) |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 997c7f19ad62..a8a919520090 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -412,7 +412,7 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) | |||
412 | if (skb->ignore_df) | 412 | if (skb->ignore_df) |
413 | return false; | 413 | return false; |
414 | 414 | ||
415 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 415 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
416 | return false; | 416 | return false; |
417 | 417 | ||
418 | return true; | 418 | return true; |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 4b15fe928278..6e0f21eed88a 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1982,14 +1982,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, | |||
1982 | { | 1982 | { |
1983 | struct net *net = dev_net(dev); | 1983 | struct net *net = dev_net(dev); |
1984 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 1984 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
1985 | struct ip6_tnl *nt, *t; | ||
1986 | struct ip_tunnel_encap ipencap; | 1985 | struct ip_tunnel_encap ipencap; |
1986 | struct ip6_tnl *nt, *t; | ||
1987 | int err; | ||
1987 | 1988 | ||
1988 | nt = netdev_priv(dev); | 1989 | nt = netdev_priv(dev); |
1989 | 1990 | ||
1990 | if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { | 1991 | if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { |
1991 | int err = ip6_tnl_encap_setup(nt, &ipencap); | 1992 | err = ip6_tnl_encap_setup(nt, &ipencap); |
1992 | |||
1993 | if (err < 0) | 1993 | if (err < 0) |
1994 | return err; | 1994 | return err; |
1995 | } | 1995 | } |
@@ -2005,7 +2005,11 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, | |||
2005 | return -EEXIST; | 2005 | return -EEXIST; |
2006 | } | 2006 | } |
2007 | 2007 | ||
2008 | return ip6_tnl_create2(dev); | 2008 | err = ip6_tnl_create2(dev); |
2009 | if (!err && tb[IFLA_MTU]) | ||
2010 | ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); | ||
2011 | |||
2012 | return err; | ||
2009 | } | 2013 | } |
2010 | 2014 | ||
2011 | static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], | 2015 | static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index d95ceca7ff8f..531d6957af36 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -21,18 +21,19 @@ | |||
21 | int ip6_route_me_harder(struct net *net, struct sk_buff *skb) | 21 | int ip6_route_me_harder(struct net *net, struct sk_buff *skb) |
22 | { | 22 | { |
23 | const struct ipv6hdr *iph = ipv6_hdr(skb); | 23 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
24 | struct sock *sk = sk_to_full_sk(skb->sk); | ||
24 | unsigned int hh_len; | 25 | unsigned int hh_len; |
25 | struct dst_entry *dst; | 26 | struct dst_entry *dst; |
26 | struct flowi6 fl6 = { | 27 | struct flowi6 fl6 = { |
27 | .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, | 28 | .flowi6_oif = sk ? sk->sk_bound_dev_if : 0, |
28 | .flowi6_mark = skb->mark, | 29 | .flowi6_mark = skb->mark, |
29 | .flowi6_uid = sock_net_uid(net, skb->sk), | 30 | .flowi6_uid = sock_net_uid(net, sk), |
30 | .daddr = iph->daddr, | 31 | .daddr = iph->daddr, |
31 | .saddr = iph->saddr, | 32 | .saddr = iph->saddr, |
32 | }; | 33 | }; |
33 | int err; | 34 | int err; |
34 | 35 | ||
35 | dst = ip6_route_output(net, skb->sk, &fl6); | 36 | dst = ip6_route_output(net, sk, &fl6); |
36 | err = dst->error; | 37 | err = dst->error; |
37 | if (err) { | 38 | if (err) { |
38 | IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); | 39 | IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); |
@@ -50,7 +51,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb) | |||
50 | if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && | 51 | if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && |
51 | xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { | 52 | xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { |
52 | skb_dst_set(skb, NULL); | 53 | skb_dst_set(skb, NULL); |
53 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); | 54 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); |
54 | if (IS_ERR(dst)) | 55 | if (IS_ERR(dst)) |
55 | return PTR_ERR(dst); | 56 | return PTR_ERR(dst); |
56 | skb_dst_set(skb, dst); | 57 | skb_dst_set(skb, dst); |
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 94deb69bbbda..91ed25a24b79 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c | |||
@@ -48,10 +48,6 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, | |||
48 | } | 48 | } |
49 | 49 | ||
50 | fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; | 50 | fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; |
51 | if ((flags & XT_RPFILTER_LOOSE) == 0) { | ||
52 | fl6.flowi6_oif = dev->ifindex; | ||
53 | lookup_flags |= RT6_LOOKUP_F_IFACE; | ||
54 | } | ||
55 | 51 | ||
56 | rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); | 52 | rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); |
57 | if (rt->dst.error) | 53 | if (rt->dst.error) |
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c index d346705d6ee6..207cb35569b1 100644 --- a/net/ipv6/netfilter/nf_flow_table_ipv6.c +++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c | |||
@@ -178,7 +178,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) | |||
178 | if (skb->len <= mtu) | 178 | if (skb->len <= mtu) |
179 | return false; | 179 | return false; |
180 | 180 | ||
181 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 181 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
182 | return false; | 182 | return false; |
183 | 183 | ||
184 | return true; | 184 | return true; |
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index bed57ee65f7b..6b7f075f811f 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c | |||
@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb, | |||
99 | !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, | 99 | !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, |
100 | target, maniptype)) | 100 | target, maniptype)) |
101 | return false; | 101 | return false; |
102 | |||
103 | /* must reload, offset might have changed */ | ||
104 | ipv6h = (void *)skb->data + iphdroff; | ||
105 | |||
102 | manip_addr: | 106 | manip_addr: |
103 | if (maniptype == NF_NAT_MANIP_SRC) | 107 | if (maniptype == NF_NAT_MANIP_SRC) |
104 | ipv6h->saddr = target->src.u3.in6; | 108 | ipv6h->saddr = target->src.u3.in6; |
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index cc5174c7254c..62fc84d7bdff 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c | |||
@@ -180,7 +180,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, | |||
180 | } | 180 | } |
181 | 181 | ||
182 | *dest = 0; | 182 | *dest = 0; |
183 | again: | ||
184 | rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags); | 183 | rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags); |
185 | if (rt->dst.error) | 184 | if (rt->dst.error) |
186 | goto put_rt_err; | 185 | goto put_rt_err; |
@@ -189,15 +188,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, | |||
189 | if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) | 188 | if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) |
190 | goto put_rt_err; | 189 | goto put_rt_err; |
191 | 190 | ||
192 | if (oif && oif != rt->rt6i_idev->dev) { | 191 | if (oif && oif != rt->rt6i_idev->dev) |
193 | /* multipath route? Try again with F_IFACE */ | 192 | goto put_rt_err; |
194 | if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) { | ||
195 | lookup_flags |= RT6_LOOKUP_F_IFACE; | ||
196 | fl6.flowi6_oif = oif->ifindex; | ||
197 | ip6_rt_put(rt); | ||
198 | goto again; | ||
199 | } | ||
200 | } | ||
201 | 193 | ||
202 | switch (priv->result) { | 194 | switch (priv->result) { |
203 | case NFT_FIB_RESULT_OIF: | 195 | case NFT_FIB_RESULT_OIF: |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 3a1775a62973..0195598f7bb5 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -1578,6 +1578,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, | |||
1578 | if (err < 0) | 1578 | if (err < 0) |
1579 | return err; | 1579 | return err; |
1580 | 1580 | ||
1581 | if (tb[IFLA_MTU]) { | ||
1582 | u32 mtu = nla_get_u32(tb[IFLA_MTU]); | ||
1583 | |||
1584 | if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) | ||
1585 | dev->mtu = mtu; | ||
1586 | } | ||
1587 | |||
1581 | #ifdef CONFIG_IPV6_SIT_6RD | 1588 | #ifdef CONFIG_IPV6_SIT_6RD |
1582 | if (ipip6_netlink_6rd_parms(data, &ip6rd)) | 1589 | if (ipip6_netlink_6rd_parms(data, &ip6rd)) |
1583 | err = ipip6_tunnel_update_6rd(nt, &ip6rd); | 1590 | err = ipip6_tunnel_update_6rd(nt, &ip6rd); |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 8ae87d4ec5ff..5959ce9620eb 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -82,7 +82,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) | |||
82 | 82 | ||
83 | if ((!skb_is_gso(skb) && skb->len > mtu) || | 83 | if ((!skb_is_gso(skb) && skb->len > mtu) || |
84 | (skb_is_gso(skb) && | 84 | (skb_is_gso(skb) && |
85 | skb_gso_network_seglen(skb) > ip6_skb_dst_mtu(skb))) { | 85 | !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) { |
86 | skb->dev = dst->dev; | 86 | skb->dev = dst->dev; |
87 | skb->protocol = htons(ETH_P_IPV6); | 87 | skb->protocol = htons(ETH_P_IPV6); |
88 | 88 | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 194a7483bb93..83421c6f0bef 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -136,51 +136,6 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) | |||
136 | 136 | ||
137 | } | 137 | } |
138 | 138 | ||
139 | /* Lookup the tunnel socket, possibly involving the fs code if the socket is | ||
140 | * owned by userspace. A struct sock returned from this function must be | ||
141 | * released using l2tp_tunnel_sock_put once you're done with it. | ||
142 | */ | ||
143 | static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) | ||
144 | { | ||
145 | int err = 0; | ||
146 | struct socket *sock = NULL; | ||
147 | struct sock *sk = NULL; | ||
148 | |||
149 | if (!tunnel) | ||
150 | goto out; | ||
151 | |||
152 | if (tunnel->fd >= 0) { | ||
153 | /* Socket is owned by userspace, who might be in the process | ||
154 | * of closing it. Look the socket up using the fd to ensure | ||
155 | * consistency. | ||
156 | */ | ||
157 | sock = sockfd_lookup(tunnel->fd, &err); | ||
158 | if (sock) | ||
159 | sk = sock->sk; | ||
160 | } else { | ||
161 | /* Socket is owned by kernelspace */ | ||
162 | sk = tunnel->sock; | ||
163 | sock_hold(sk); | ||
164 | } | ||
165 | |||
166 | out: | ||
167 | return sk; | ||
168 | } | ||
169 | |||
170 | /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ | ||
171 | static void l2tp_tunnel_sock_put(struct sock *sk) | ||
172 | { | ||
173 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
174 | if (tunnel) { | ||
175 | if (tunnel->fd >= 0) { | ||
176 | /* Socket is owned by userspace */ | ||
177 | sockfd_put(sk->sk_socket); | ||
178 | } | ||
179 | sock_put(sk); | ||
180 | } | ||
181 | sock_put(sk); | ||
182 | } | ||
183 | |||
184 | /* Session hash list. | 139 | /* Session hash list. |
185 | * The session_id SHOULD be random according to RFC2661, but several | 140 | * The session_id SHOULD be random according to RFC2661, but several |
186 | * L2TP implementations (Cisco and Microsoft) use incrementing | 141 | * L2TP implementations (Cisco and Microsoft) use incrementing |
@@ -193,6 +148,13 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) | |||
193 | return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; | 148 | return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; |
194 | } | 149 | } |
195 | 150 | ||
151 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) | ||
152 | { | ||
153 | sock_put(tunnel->sock); | ||
154 | /* the tunnel is freed in the socket destructor */ | ||
155 | } | ||
156 | EXPORT_SYMBOL(l2tp_tunnel_free); | ||
157 | |||
196 | /* Lookup a tunnel. A new reference is held on the returned tunnel. */ | 158 | /* Lookup a tunnel. A new reference is held on the returned tunnel. */ |
197 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) | 159 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) |
198 | { | 160 | { |
@@ -345,13 +307,11 @@ int l2tp_session_register(struct l2tp_session *session, | |||
345 | } | 307 | } |
346 | 308 | ||
347 | l2tp_tunnel_inc_refcount(tunnel); | 309 | l2tp_tunnel_inc_refcount(tunnel); |
348 | sock_hold(tunnel->sock); | ||
349 | hlist_add_head_rcu(&session->global_hlist, g_head); | 310 | hlist_add_head_rcu(&session->global_hlist, g_head); |
350 | 311 | ||
351 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | 312 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); |
352 | } else { | 313 | } else { |
353 | l2tp_tunnel_inc_refcount(tunnel); | 314 | l2tp_tunnel_inc_refcount(tunnel); |
354 | sock_hold(tunnel->sock); | ||
355 | } | 315 | } |
356 | 316 | ||
357 | hlist_add_head(&session->hlist, head); | 317 | hlist_add_head(&session->hlist, head); |
@@ -969,7 +929,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
969 | { | 929 | { |
970 | struct l2tp_tunnel *tunnel; | 930 | struct l2tp_tunnel *tunnel; |
971 | 931 | ||
972 | tunnel = l2tp_sock_to_tunnel(sk); | 932 | tunnel = l2tp_tunnel(sk); |
973 | if (tunnel == NULL) | 933 | if (tunnel == NULL) |
974 | goto pass_up; | 934 | goto pass_up; |
975 | 935 | ||
@@ -977,13 +937,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
977 | tunnel->name, skb->len); | 937 | tunnel->name, skb->len); |
978 | 938 | ||
979 | if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) | 939 | if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) |
980 | goto pass_up_put; | 940 | goto pass_up; |
981 | 941 | ||
982 | sock_put(sk); | ||
983 | return 0; | 942 | return 0; |
984 | 943 | ||
985 | pass_up_put: | ||
986 | sock_put(sk); | ||
987 | pass_up: | 944 | pass_up: |
988 | return 1; | 945 | return 1; |
989 | } | 946 | } |
@@ -1207,14 +1164,12 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb); | |||
1207 | static void l2tp_tunnel_destruct(struct sock *sk) | 1164 | static void l2tp_tunnel_destruct(struct sock *sk) |
1208 | { | 1165 | { |
1209 | struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); | 1166 | struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); |
1210 | struct l2tp_net *pn; | ||
1211 | 1167 | ||
1212 | if (tunnel == NULL) | 1168 | if (tunnel == NULL) |
1213 | goto end; | 1169 | goto end; |
1214 | 1170 | ||
1215 | l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); | 1171 | l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); |
1216 | 1172 | ||
1217 | |||
1218 | /* Disable udp encapsulation */ | 1173 | /* Disable udp encapsulation */ |
1219 | switch (tunnel->encap) { | 1174 | switch (tunnel->encap) { |
1220 | case L2TP_ENCAPTYPE_UDP: | 1175 | case L2TP_ENCAPTYPE_UDP: |
@@ -1231,18 +1186,11 @@ static void l2tp_tunnel_destruct(struct sock *sk) | |||
1231 | sk->sk_destruct = tunnel->old_sk_destruct; | 1186 | sk->sk_destruct = tunnel->old_sk_destruct; |
1232 | sk->sk_user_data = NULL; | 1187 | sk->sk_user_data = NULL; |
1233 | 1188 | ||
1234 | /* Remove the tunnel struct from the tunnel list */ | ||
1235 | pn = l2tp_pernet(tunnel->l2tp_net); | ||
1236 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | ||
1237 | list_del_rcu(&tunnel->list); | ||
1238 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | ||
1239 | |||
1240 | tunnel->sock = NULL; | ||
1241 | l2tp_tunnel_dec_refcount(tunnel); | ||
1242 | |||
1243 | /* Call the original destructor */ | 1189 | /* Call the original destructor */ |
1244 | if (sk->sk_destruct) | 1190 | if (sk->sk_destruct) |
1245 | (*sk->sk_destruct)(sk); | 1191 | (*sk->sk_destruct)(sk); |
1192 | |||
1193 | kfree_rcu(tunnel, rcu); | ||
1246 | end: | 1194 | end: |
1247 | return; | 1195 | return; |
1248 | } | 1196 | } |
@@ -1303,49 +1251,43 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); | |||
1303 | /* Tunnel socket destroy hook for UDP encapsulation */ | 1251 | /* Tunnel socket destroy hook for UDP encapsulation */ |
1304 | static void l2tp_udp_encap_destroy(struct sock *sk) | 1252 | static void l2tp_udp_encap_destroy(struct sock *sk) |
1305 | { | 1253 | { |
1306 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | 1254 | struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); |
1307 | if (tunnel) { | 1255 | |
1308 | l2tp_tunnel_closeall(tunnel); | 1256 | if (tunnel) |
1309 | sock_put(sk); | 1257 | l2tp_tunnel_delete(tunnel); |
1310 | } | ||
1311 | } | 1258 | } |
1312 | 1259 | ||
1313 | /* Workqueue tunnel deletion function */ | 1260 | /* Workqueue tunnel deletion function */ |
1314 | static void l2tp_tunnel_del_work(struct work_struct *work) | 1261 | static void l2tp_tunnel_del_work(struct work_struct *work) |
1315 | { | 1262 | { |
1316 | struct l2tp_tunnel *tunnel = NULL; | 1263 | struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel, |
1317 | struct socket *sock = NULL; | 1264 | del_work); |
1318 | struct sock *sk = NULL; | 1265 | struct sock *sk = tunnel->sock; |
1319 | 1266 | struct socket *sock = sk->sk_socket; | |
1320 | tunnel = container_of(work, struct l2tp_tunnel, del_work); | 1267 | struct l2tp_net *pn; |
1321 | 1268 | ||
1322 | l2tp_tunnel_closeall(tunnel); | 1269 | l2tp_tunnel_closeall(tunnel); |
1323 | 1270 | ||
1324 | sk = l2tp_tunnel_sock_lookup(tunnel); | 1271 | /* If the tunnel socket was created within the kernel, use |
1325 | if (!sk) | ||
1326 | goto out; | ||
1327 | |||
1328 | sock = sk->sk_socket; | ||
1329 | |||
1330 | /* If the tunnel socket was created by userspace, then go through the | ||
1331 | * inet layer to shut the socket down, and let userspace close it. | ||
1332 | * Otherwise, if we created the socket directly within the kernel, use | ||
1333 | * the sk API to release it here. | 1272 | * the sk API to release it here. |
1334 | * In either case the tunnel resources are freed in the socket | ||
1335 | * destructor when the tunnel socket goes away. | ||
1336 | */ | 1273 | */ |
1337 | if (tunnel->fd >= 0) { | 1274 | if (tunnel->fd < 0) { |
1338 | if (sock) | ||
1339 | inet_shutdown(sock, 2); | ||
1340 | } else { | ||
1341 | if (sock) { | 1275 | if (sock) { |
1342 | kernel_sock_shutdown(sock, SHUT_RDWR); | 1276 | kernel_sock_shutdown(sock, SHUT_RDWR); |
1343 | sock_release(sock); | 1277 | sock_release(sock); |
1344 | } | 1278 | } |
1345 | } | 1279 | } |
1346 | 1280 | ||
1347 | l2tp_tunnel_sock_put(sk); | 1281 | /* Remove the tunnel struct from the tunnel list */ |
1348 | out: | 1282 | pn = l2tp_pernet(tunnel->l2tp_net); |
1283 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | ||
1284 | list_del_rcu(&tunnel->list); | ||
1285 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | ||
1286 | |||
1287 | /* drop initial ref */ | ||
1288 | l2tp_tunnel_dec_refcount(tunnel); | ||
1289 | |||
1290 | /* drop workqueue ref */ | ||
1349 | l2tp_tunnel_dec_refcount(tunnel); | 1291 | l2tp_tunnel_dec_refcount(tunnel); |
1350 | } | 1292 | } |
1351 | 1293 | ||
@@ -1598,13 +1540,22 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1598 | sk->sk_user_data = tunnel; | 1540 | sk->sk_user_data = tunnel; |
1599 | } | 1541 | } |
1600 | 1542 | ||
1543 | /* Bump the reference count. The tunnel context is deleted | ||
1544 | * only when this drops to zero. A reference is also held on | ||
1545 | * the tunnel socket to ensure that it is not released while | ||
1546 | * the tunnel is extant. Must be done before sk_destruct is | ||
1547 | * set. | ||
1548 | */ | ||
1549 | refcount_set(&tunnel->ref_count, 1); | ||
1550 | sock_hold(sk); | ||
1551 | tunnel->sock = sk; | ||
1552 | tunnel->fd = fd; | ||
1553 | |||
1601 | /* Hook on the tunnel socket destructor so that we can cleanup | 1554 | /* Hook on the tunnel socket destructor so that we can cleanup |
1602 | * if the tunnel socket goes away. | 1555 | * if the tunnel socket goes away. |
1603 | */ | 1556 | */ |
1604 | tunnel->old_sk_destruct = sk->sk_destruct; | 1557 | tunnel->old_sk_destruct = sk->sk_destruct; |
1605 | sk->sk_destruct = &l2tp_tunnel_destruct; | 1558 | sk->sk_destruct = &l2tp_tunnel_destruct; |
1606 | tunnel->sock = sk; | ||
1607 | tunnel->fd = fd; | ||
1608 | lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); | 1559 | lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); |
1609 | 1560 | ||
1610 | sk->sk_allocation = GFP_ATOMIC; | 1561 | sk->sk_allocation = GFP_ATOMIC; |
@@ -1614,11 +1565,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1614 | 1565 | ||
1615 | /* Add tunnel to our list */ | 1566 | /* Add tunnel to our list */ |
1616 | INIT_LIST_HEAD(&tunnel->list); | 1567 | INIT_LIST_HEAD(&tunnel->list); |
1617 | |||
1618 | /* Bump the reference count. The tunnel context is deleted | ||
1619 | * only when this drops to zero. Must be done before list insertion | ||
1620 | */ | ||
1621 | refcount_set(&tunnel->ref_count, 1); | ||
1622 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | 1568 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); |
1623 | list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); | 1569 | list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); |
1624 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | 1570 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); |
@@ -1659,8 +1605,6 @@ void l2tp_session_free(struct l2tp_session *session) | |||
1659 | 1605 | ||
1660 | if (tunnel) { | 1606 | if (tunnel) { |
1661 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | 1607 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); |
1662 | sock_put(tunnel->sock); | ||
1663 | session->tunnel = NULL; | ||
1664 | l2tp_tunnel_dec_refcount(tunnel); | 1608 | l2tp_tunnel_dec_refcount(tunnel); |
1665 | } | 1609 | } |
1666 | 1610 | ||
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 9bbee90e9963..a1aa9550f04e 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -214,27 +214,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session) | |||
214 | return &session->priv[0]; | 214 | return &session->priv[0]; |
215 | } | 215 | } |
216 | 216 | ||
217 | static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk) | ||
218 | { | ||
219 | struct l2tp_tunnel *tunnel; | ||
220 | |||
221 | if (sk == NULL) | ||
222 | return NULL; | ||
223 | |||
224 | sock_hold(sk); | ||
225 | tunnel = (struct l2tp_tunnel *)(sk->sk_user_data); | ||
226 | if (tunnel == NULL) { | ||
227 | sock_put(sk); | ||
228 | goto out; | ||
229 | } | ||
230 | |||
231 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | ||
232 | |||
233 | out: | ||
234 | return tunnel; | ||
235 | } | ||
236 | |||
237 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); | 217 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); |
218 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | ||
238 | 219 | ||
239 | struct l2tp_session *l2tp_session_get(const struct net *net, | 220 | struct l2tp_session *l2tp_session_get(const struct net *net, |
240 | struct l2tp_tunnel *tunnel, | 221 | struct l2tp_tunnel *tunnel, |
@@ -283,7 +264,7 @@ static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel) | |||
283 | static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) | 264 | static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) |
284 | { | 265 | { |
285 | if (refcount_dec_and_test(&tunnel->ref_count)) | 266 | if (refcount_dec_and_test(&tunnel->ref_count)) |
286 | kfree_rcu(tunnel, rcu); | 267 | l2tp_tunnel_free(tunnel); |
287 | } | 268 | } |
288 | 269 | ||
289 | /* Session reference counts. Incremented when code obtains a reference | 270 | /* Session reference counts. Incremented when code obtains a reference |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index ff61124fdf59..3428fba6f2b7 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -234,17 +234,13 @@ static void l2tp_ip_close(struct sock *sk, long timeout) | |||
234 | static void l2tp_ip_destroy_sock(struct sock *sk) | 234 | static void l2tp_ip_destroy_sock(struct sock *sk) |
235 | { | 235 | { |
236 | struct sk_buff *skb; | 236 | struct sk_buff *skb; |
237 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | 237 | struct l2tp_tunnel *tunnel = sk->sk_user_data; |
238 | 238 | ||
239 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) | 239 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) |
240 | kfree_skb(skb); | 240 | kfree_skb(skb); |
241 | 241 | ||
242 | if (tunnel) { | 242 | if (tunnel) |
243 | l2tp_tunnel_closeall(tunnel); | 243 | l2tp_tunnel_delete(tunnel); |
244 | sock_put(sk); | ||
245 | } | ||
246 | |||
247 | sk_refcnt_debug_dec(sk); | ||
248 | } | 244 | } |
249 | 245 | ||
250 | static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | 246 | static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 192344688c06..6f009eaa5fbe 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -248,16 +248,14 @@ static void l2tp_ip6_close(struct sock *sk, long timeout) | |||
248 | 248 | ||
249 | static void l2tp_ip6_destroy_sock(struct sock *sk) | 249 | static void l2tp_ip6_destroy_sock(struct sock *sk) |
250 | { | 250 | { |
251 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | 251 | struct l2tp_tunnel *tunnel = sk->sk_user_data; |
252 | 252 | ||
253 | lock_sock(sk); | 253 | lock_sock(sk); |
254 | ip6_flush_pending_frames(sk); | 254 | ip6_flush_pending_frames(sk); |
255 | release_sock(sk); | 255 | release_sock(sk); |
256 | 256 | ||
257 | if (tunnel) { | 257 | if (tunnel) |
258 | l2tp_tunnel_closeall(tunnel); | 258 | l2tp_tunnel_delete(tunnel); |
259 | sock_put(sk); | ||
260 | } | ||
261 | 259 | ||
262 | inet6_destroy_sock(sk); | 260 | inet6_destroy_sock(sk); |
263 | } | 261 | } |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 59f246d7b290..3b02f24ea9ec 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -416,20 +416,28 @@ abort: | |||
416 | * Session (and tunnel control) socket create/destroy. | 416 | * Session (and tunnel control) socket create/destroy. |
417 | *****************************************************************************/ | 417 | *****************************************************************************/ |
418 | 418 | ||
419 | static void pppol2tp_put_sk(struct rcu_head *head) | ||
420 | { | ||
421 | struct pppol2tp_session *ps; | ||
422 | |||
423 | ps = container_of(head, typeof(*ps), rcu); | ||
424 | sock_put(ps->__sk); | ||
425 | } | ||
426 | |||
419 | /* Called by l2tp_core when a session socket is being closed. | 427 | /* Called by l2tp_core when a session socket is being closed. |
420 | */ | 428 | */ |
421 | static void pppol2tp_session_close(struct l2tp_session *session) | 429 | static void pppol2tp_session_close(struct l2tp_session *session) |
422 | { | 430 | { |
423 | struct sock *sk; | 431 | struct pppol2tp_session *ps; |
424 | |||
425 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
426 | 432 | ||
427 | sk = pppol2tp_session_get_sock(session); | 433 | ps = l2tp_session_priv(session); |
428 | if (sk) { | 434 | mutex_lock(&ps->sk_lock); |
429 | if (sk->sk_socket) | 435 | ps->__sk = rcu_dereference_protected(ps->sk, |
430 | inet_shutdown(sk->sk_socket, SEND_SHUTDOWN); | 436 | lockdep_is_held(&ps->sk_lock)); |
431 | sock_put(sk); | 437 | RCU_INIT_POINTER(ps->sk, NULL); |
432 | } | 438 | if (ps->__sk) |
439 | call_rcu(&ps->rcu, pppol2tp_put_sk); | ||
440 | mutex_unlock(&ps->sk_lock); | ||
433 | } | 441 | } |
434 | 442 | ||
435 | /* Really kill the session socket. (Called from sock_put() if | 443 | /* Really kill the session socket. (Called from sock_put() if |
@@ -449,14 +457,6 @@ static void pppol2tp_session_destruct(struct sock *sk) | |||
449 | } | 457 | } |
450 | } | 458 | } |
451 | 459 | ||
452 | static void pppol2tp_put_sk(struct rcu_head *head) | ||
453 | { | ||
454 | struct pppol2tp_session *ps; | ||
455 | |||
456 | ps = container_of(head, typeof(*ps), rcu); | ||
457 | sock_put(ps->__sk); | ||
458 | } | ||
459 | |||
460 | /* Called when the PPPoX socket (session) is closed. | 460 | /* Called when the PPPoX socket (session) is closed. |
461 | */ | 461 | */ |
462 | static int pppol2tp_release(struct socket *sock) | 462 | static int pppol2tp_release(struct socket *sock) |
@@ -480,26 +480,17 @@ static int pppol2tp_release(struct socket *sock) | |||
480 | sock_orphan(sk); | 480 | sock_orphan(sk); |
481 | sock->sk = NULL; | 481 | sock->sk = NULL; |
482 | 482 | ||
483 | /* If the socket is associated with a session, | ||
484 | * l2tp_session_delete will call pppol2tp_session_close which | ||
485 | * will drop the session's ref on the socket. | ||
486 | */ | ||
483 | session = pppol2tp_sock_to_session(sk); | 487 | session = pppol2tp_sock_to_session(sk); |
484 | 488 | if (session) { | |
485 | if (session != NULL) { | ||
486 | struct pppol2tp_session *ps; | ||
487 | |||
488 | l2tp_session_delete(session); | 489 | l2tp_session_delete(session); |
489 | 490 | /* drop the ref obtained by pppol2tp_sock_to_session */ | |
490 | ps = l2tp_session_priv(session); | 491 | sock_put(sk); |
491 | mutex_lock(&ps->sk_lock); | ||
492 | ps->__sk = rcu_dereference_protected(ps->sk, | ||
493 | lockdep_is_held(&ps->sk_lock)); | ||
494 | RCU_INIT_POINTER(ps->sk, NULL); | ||
495 | mutex_unlock(&ps->sk_lock); | ||
496 | call_rcu(&ps->rcu, pppol2tp_put_sk); | ||
497 | |||
498 | /* Rely on the sock_put() call at the end of the function for | ||
499 | * dropping the reference held by pppol2tp_sock_to_session(). | ||
500 | * The last reference will be dropped by pppol2tp_put_sk(). | ||
501 | */ | ||
502 | } | 492 | } |
493 | |||
503 | release_sock(sk); | 494 | release_sock(sk); |
504 | 495 | ||
505 | /* This will delete the session context via | 496 | /* This will delete the session context via |
@@ -796,6 +787,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
796 | 787 | ||
797 | out_no_ppp: | 788 | out_no_ppp: |
798 | /* This is how we get the session context from the socket. */ | 789 | /* This is how we get the session context from the socket. */ |
790 | sock_hold(sk); | ||
799 | sk->sk_user_data = session; | 791 | sk->sk_user_data = session; |
800 | rcu_assign_pointer(ps->sk, sk); | 792 | rcu_assign_pointer(ps->sk, sk); |
801 | mutex_unlock(&ps->sk_lock); | 793 | mutex_unlock(&ps->sk_lock); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index fd580614085b..56fe16b07538 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -3921,7 +3921,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, | |||
3921 | if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | | 3921 | if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | |
3922 | IEEE80211_FCTL_TODS)) != | 3922 | IEEE80211_FCTL_TODS)) != |
3923 | fast_rx->expected_ds_bits) | 3923 | fast_rx->expected_ds_bits) |
3924 | goto drop; | 3924 | return false; |
3925 | 3925 | ||
3926 | /* assign the key to drop unencrypted frames (later) | 3926 | /* assign the key to drop unencrypted frames (later) |
3927 | * and strip the IV/MIC if necessary | 3927 | * and strip the IV/MIC if necessary |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 25904af38839..69722504e3e1 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -3574,6 +3574,14 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
3574 | if (!IS_ERR_OR_NULL(sta)) { | 3574 | if (!IS_ERR_OR_NULL(sta)) { |
3575 | struct ieee80211_fast_tx *fast_tx; | 3575 | struct ieee80211_fast_tx *fast_tx; |
3576 | 3576 | ||
3577 | /* We need a bit of data queued to build aggregates properly, so | ||
3578 | * instruct the TCP stack to allow more than a single ms of data | ||
3579 | * to be queued in the stack. The value is a bit-shift of 1 | ||
3580 | * second, so 8 is ~4ms of queued data. Only affects local TCP | ||
3581 | * sockets. | ||
3582 | */ | ||
3583 | sk_pacing_shift_update(skb->sk, 8); | ||
3584 | |||
3577 | fast_tx = rcu_dereference(sta->fast_tx); | 3585 | fast_tx = rcu_dereference(sta->fast_tx); |
3578 | 3586 | ||
3579 | if (fast_tx && | 3587 | if (fast_tx && |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index e545a3c9365f..7a4de6d618b1 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
@@ -122,7 +122,7 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) | |||
122 | if (skb->len <= mtu) | 122 | if (skb->len <= mtu) |
123 | return false; | 123 | return false; |
124 | 124 | ||
125 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 125 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
126 | return false; | 126 | return false; |
127 | 127 | ||
128 | return true; | 128 | return true; |
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 3e17d32b629d..58d5d05aec24 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c | |||
@@ -260,7 +260,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
260 | buf_len = strlen(buf); | 260 | buf_len = strlen(buf); |
261 | 261 | ||
262 | ct = nf_ct_get(skb, &ctinfo); | 262 | ct = nf_ct_get(skb, &ctinfo); |
263 | if (ct && (ct->status & IPS_NAT_MASK)) { | 263 | if (ct) { |
264 | bool mangled; | 264 | bool mangled; |
265 | 265 | ||
266 | /* If mangling fails this function will return 0 | 266 | /* If mangling fails this function will return 0 |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 8b9fe30de0cd..558593e6a0a3 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -5037,9 +5037,9 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, | |||
5037 | { | 5037 | { |
5038 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 5038 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
5039 | const struct nf_flowtable_type *type; | 5039 | const struct nf_flowtable_type *type; |
5040 | struct nft_flowtable *flowtable, *ft; | ||
5040 | u8 genmask = nft_genmask_next(net); | 5041 | u8 genmask = nft_genmask_next(net); |
5041 | int family = nfmsg->nfgen_family; | 5042 | int family = nfmsg->nfgen_family; |
5042 | struct nft_flowtable *flowtable; | ||
5043 | struct nft_table *table; | 5043 | struct nft_table *table; |
5044 | struct nft_ctx ctx; | 5044 | struct nft_ctx ctx; |
5045 | int err, i, k; | 5045 | int err, i, k; |
@@ -5099,6 +5099,22 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, | |||
5099 | goto err3; | 5099 | goto err3; |
5100 | 5100 | ||
5101 | for (i = 0; i < flowtable->ops_len; i++) { | 5101 | for (i = 0; i < flowtable->ops_len; i++) { |
5102 | if (!flowtable->ops[i].dev) | ||
5103 | continue; | ||
5104 | |||
5105 | list_for_each_entry(ft, &table->flowtables, list) { | ||
5106 | for (k = 0; k < ft->ops_len; k++) { | ||
5107 | if (!ft->ops[k].dev) | ||
5108 | continue; | ||
5109 | |||
5110 | if (flowtable->ops[i].dev == ft->ops[k].dev && | ||
5111 | flowtable->ops[i].pf == ft->ops[k].pf) { | ||
5112 | err = -EBUSY; | ||
5113 | goto err4; | ||
5114 | } | ||
5115 | } | ||
5116 | } | ||
5117 | |||
5102 | err = nf_register_net_hook(net, &flowtable->ops[i]); | 5118 | err = nf_register_net_hook(net, &flowtable->ops[i]); |
5103 | if (err < 0) | 5119 | if (err < 0) |
5104 | goto err4; | 5120 | goto err4; |
@@ -5120,7 +5136,7 @@ err5: | |||
5120 | i = flowtable->ops_len; | 5136 | i = flowtable->ops_len; |
5121 | err4: | 5137 | err4: |
5122 | for (k = i - 1; k >= 0; k--) | 5138 | for (k = i - 1; k >= 0; k--) |
5123 | nf_unregister_net_hook(net, &flowtable->ops[i]); | 5139 | nf_unregister_net_hook(net, &flowtable->ops[k]); |
5124 | 5140 | ||
5125 | kfree(flowtable->ops); | 5141 | kfree(flowtable->ops); |
5126 | err3: | 5142 | err3: |
@@ -5145,6 +5161,11 @@ static int nf_tables_delflowtable(struct net *net, struct sock *nlsk, | |||
5145 | struct nft_table *table; | 5161 | struct nft_table *table; |
5146 | struct nft_ctx ctx; | 5162 | struct nft_ctx ctx; |
5147 | 5163 | ||
5164 | if (!nla[NFTA_FLOWTABLE_TABLE] || | ||
5165 | (!nla[NFTA_FLOWTABLE_NAME] && | ||
5166 | !nla[NFTA_FLOWTABLE_HANDLE])) | ||
5167 | return -EINVAL; | ||
5168 | |||
5148 | table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], | 5169 | table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], |
5149 | family, genmask); | 5170 | family, genmask); |
5150 | if (IS_ERR(table)) | 5171 | if (IS_ERR(table)) |
diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c index 50615d5efac1..9cf089b9754e 100644 --- a/net/qrtr/smd.c +++ b/net/qrtr/smd.c | |||
@@ -114,5 +114,6 @@ static struct rpmsg_driver qcom_smd_qrtr_driver = { | |||
114 | 114 | ||
115 | module_rpmsg_driver(qcom_smd_qrtr_driver); | 115 | module_rpmsg_driver(qcom_smd_qrtr_driver); |
116 | 116 | ||
117 | MODULE_ALIAS("rpmsg:IPCRTR"); | ||
117 | MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); | 118 | MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); |
118 | MODULE_LICENSE("GPL v2"); | 119 | MODULE_LICENSE("GPL v2"); |
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index c061d6eb465d..22571189f21e 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | 2 | * Copyright (c) 2006, 2018 Oracle. All rights reserved. |
3 | * | 3 | * |
4 | * This software is available to you under a choice of one of two | 4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -142,12 +142,20 @@ int rds_tcp_accept_one(struct socket *sock) | |||
142 | if (ret) | 142 | if (ret) |
143 | goto out; | 143 | goto out; |
144 | 144 | ||
145 | new_sock->type = sock->type; | ||
146 | new_sock->ops = sock->ops; | ||
147 | ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); | 145 | ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); |
148 | if (ret < 0) | 146 | if (ret < 0) |
149 | goto out; | 147 | goto out; |
150 | 148 | ||
149 | /* sock_create_lite() does not get a hold on the owner module so we | ||
150 | * need to do it here. Note that sock_release() uses sock->ops to | ||
151 | * determine if it needs to decrement the reference count. So set | ||
152 | * sock->ops after calling accept() in case that fails. And there's | ||
153 | * no need to do try_module_get() as the listener should have a hold | ||
154 | * already. | ||
155 | */ | ||
156 | new_sock->ops = sock->ops; | ||
157 | __module_get(new_sock->ops->owner); | ||
158 | |||
151 | ret = rds_tcp_keepalive(new_sock); | 159 | ret = rds_tcp_keepalive(new_sock); |
152 | if (ret < 0) | 160 | if (ret < 0) |
153 | goto out; | 161 | goto out; |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 229172d509cc..03225a8df973 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -188,7 +188,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
188 | int ret; | 188 | int ret; |
189 | 189 | ||
190 | if (qdisc_pkt_len(skb) > q->max_size) { | 190 | if (qdisc_pkt_len(skb) > q->max_size) { |
191 | if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) | 191 | if (skb_is_gso(skb) && |
192 | skb_gso_validate_mac_len(skb, q->max_size)) | ||
192 | return tbf_segment(skb, sch, to_free); | 193 | return tbf_segment(skb, sch, to_free); |
193 | return qdisc_drop(skb, sch, to_free); | 194 | return qdisc_drop(skb, sch, to_free); |
194 | } | 195 | } |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index da1a5cdefd13..8cc97834d4f6 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -1406,8 +1406,10 @@ static int smc_create(struct net *net, struct socket *sock, int protocol, | |||
1406 | smc->use_fallback = false; /* assume rdma capability first */ | 1406 | smc->use_fallback = false; /* assume rdma capability first */ |
1407 | rc = sock_create_kern(net, PF_INET, SOCK_STREAM, | 1407 | rc = sock_create_kern(net, PF_INET, SOCK_STREAM, |
1408 | IPPROTO_TCP, &smc->clcsock); | 1408 | IPPROTO_TCP, &smc->clcsock); |
1409 | if (rc) | 1409 | if (rc) { |
1410 | sk_common_release(sk); | 1410 | sk_common_release(sk); |
1411 | goto out; | ||
1412 | } | ||
1411 | smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); | 1413 | smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); |
1412 | smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); | 1414 | smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); |
1413 | 1415 | ||
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index 3cd086e5bd28..b42395d24cba 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
@@ -269,7 +269,7 @@ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf) | |||
269 | 269 | ||
270 | if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) | 270 | if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) |
271 | return; /* short message */ | 271 | return; /* short message */ |
272 | if (cdc->len != sizeof(*cdc)) | 272 | if (cdc->len != SMC_WR_TX_SIZE) |
273 | return; /* invalid message */ | 273 | return; /* invalid message */ |
274 | smc_cdc_msg_recv(cdc, link, wc->wr_id); | 274 | smc_cdc_msg_recv(cdc, link, wc->wr_id); |
275 | } | 275 | } |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 2424c7100aaf..645dd226177b 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
@@ -177,6 +177,7 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, | |||
177 | 177 | ||
178 | lnk = &lgr->lnk[SMC_SINGLE_LINK]; | 178 | lnk = &lgr->lnk[SMC_SINGLE_LINK]; |
179 | /* initialize link */ | 179 | /* initialize link */ |
180 | lnk->link_id = SMC_SINGLE_LINK; | ||
180 | lnk->smcibdev = smcibdev; | 181 | lnk->smcibdev = smcibdev; |
181 | lnk->ibport = ibport; | 182 | lnk->ibport = ibport; |
182 | lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; | 183 | lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; |
@@ -465,7 +466,7 @@ create: | |||
465 | rc = smc_link_determine_gid(conn->lgr); | 466 | rc = smc_link_determine_gid(conn->lgr); |
466 | } | 467 | } |
467 | conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; | 468 | conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; |
468 | conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg); | 469 | conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; |
469 | #ifndef KERNEL_HAS_ATOMIC64 | 470 | #ifndef KERNEL_HAS_ATOMIC64 |
470 | spin_lock_init(&conn->acurs_lock); | 471 | spin_lock_init(&conn->acurs_lock); |
471 | #endif | 472 | #endif |
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 92fe4cc8c82c..b4aa4fcedb96 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c | |||
@@ -92,7 +92,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[], | |||
92 | memcpy(confllc->sender_mac, mac, ETH_ALEN); | 92 | memcpy(confllc->sender_mac, mac, ETH_ALEN); |
93 | memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); | 93 | memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); |
94 | hton24(confllc->sender_qp_num, link->roce_qp->qp_num); | 94 | hton24(confllc->sender_qp_num, link->roce_qp->qp_num); |
95 | /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */ | 95 | confllc->link_num = link->link_id; |
96 | memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); | 96 | memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); |
97 | confllc->max_links = SMC_LINKS_PER_LGR_MAX; | 97 | confllc->max_links = SMC_LINKS_PER_LGR_MAX; |
98 | /* send llc message */ | 98 | /* send llc message */ |
diff --git a/net/tipc/group.c b/net/tipc/group.c index 122162a31816..04e516d18054 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c | |||
@@ -189,6 +189,7 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid, | |||
189 | grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK; | 189 | grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK; |
190 | grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS; | 190 | grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS; |
191 | grp->open = group_is_open; | 191 | grp->open = group_is_open; |
192 | *grp->open = false; | ||
192 | filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE; | 193 | filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE; |
193 | if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, | 194 | if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, |
194 | filter, &grp->subid)) | 195 | filter, &grp->subid)) |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index b0323ec7971e..7dfa9fc99ec3 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -473,6 +473,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | |||
473 | sk->sk_write_space = tipc_write_space; | 473 | sk->sk_write_space = tipc_write_space; |
474 | sk->sk_destruct = tipc_sock_destruct; | 474 | sk->sk_destruct = tipc_sock_destruct; |
475 | tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; | 475 | tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; |
476 | tsk->group_is_open = true; | ||
476 | atomic_set(&tsk->dupl_rcvcnt, 0); | 477 | atomic_set(&tsk->dupl_rcvcnt, 0); |
477 | 478 | ||
478 | /* Start out with safe limits until we receive an advertised window */ | 479 | /* Start out with safe limits until we receive an advertised window */ |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index e9b4b53ab53e..d824d548447e 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
@@ -46,16 +46,26 @@ MODULE_DESCRIPTION("Transport Layer Security Support"); | |||
46 | MODULE_LICENSE("Dual BSD/GPL"); | 46 | MODULE_LICENSE("Dual BSD/GPL"); |
47 | 47 | ||
48 | enum { | 48 | enum { |
49 | TLSV4, | ||
50 | TLSV6, | ||
51 | TLS_NUM_PROTS, | ||
52 | }; | ||
53 | |||
54 | enum { | ||
49 | TLS_BASE_TX, | 55 | TLS_BASE_TX, |
50 | TLS_SW_TX, | 56 | TLS_SW_TX, |
51 | TLS_NUM_CONFIG, | 57 | TLS_NUM_CONFIG, |
52 | }; | 58 | }; |
53 | 59 | ||
54 | static struct proto tls_prots[TLS_NUM_CONFIG]; | 60 | static struct proto *saved_tcpv6_prot; |
61 | static DEFINE_MUTEX(tcpv6_prot_mutex); | ||
62 | static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG]; | ||
55 | 63 | ||
56 | static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx) | 64 | static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx) |
57 | { | 65 | { |
58 | sk->sk_prot = &tls_prots[ctx->tx_conf]; | 66 | int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; |
67 | |||
68 | sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf]; | ||
59 | } | 69 | } |
60 | 70 | ||
61 | int wait_on_pending_writer(struct sock *sk, long *timeo) | 71 | int wait_on_pending_writer(struct sock *sk, long *timeo) |
@@ -453,8 +463,21 @@ static int tls_setsockopt(struct sock *sk, int level, int optname, | |||
453 | return do_tls_setsockopt(sk, optname, optval, optlen); | 463 | return do_tls_setsockopt(sk, optname, optval, optlen); |
454 | } | 464 | } |
455 | 465 | ||
466 | static void build_protos(struct proto *prot, struct proto *base) | ||
467 | { | ||
468 | prot[TLS_BASE_TX] = *base; | ||
469 | prot[TLS_BASE_TX].setsockopt = tls_setsockopt; | ||
470 | prot[TLS_BASE_TX].getsockopt = tls_getsockopt; | ||
471 | prot[TLS_BASE_TX].close = tls_sk_proto_close; | ||
472 | |||
473 | prot[TLS_SW_TX] = prot[TLS_BASE_TX]; | ||
474 | prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg; | ||
475 | prot[TLS_SW_TX].sendpage = tls_sw_sendpage; | ||
476 | } | ||
477 | |||
456 | static int tls_init(struct sock *sk) | 478 | static int tls_init(struct sock *sk) |
457 | { | 479 | { |
480 | int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; | ||
458 | struct inet_connection_sock *icsk = inet_csk(sk); | 481 | struct inet_connection_sock *icsk = inet_csk(sk); |
459 | struct tls_context *ctx; | 482 | struct tls_context *ctx; |
460 | int rc = 0; | 483 | int rc = 0; |
@@ -479,6 +502,17 @@ static int tls_init(struct sock *sk) | |||
479 | ctx->getsockopt = sk->sk_prot->getsockopt; | 502 | ctx->getsockopt = sk->sk_prot->getsockopt; |
480 | ctx->sk_proto_close = sk->sk_prot->close; | 503 | ctx->sk_proto_close = sk->sk_prot->close; |
481 | 504 | ||
505 | /* Build IPv6 TLS whenever the address of tcpv6_prot changes */ | ||
506 | if (ip_ver == TLSV6 && | ||
507 | unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { | ||
508 | mutex_lock(&tcpv6_prot_mutex); | ||
509 | if (likely(sk->sk_prot != saved_tcpv6_prot)) { | ||
510 | build_protos(tls_prots[TLSV6], sk->sk_prot); | ||
511 | smp_store_release(&saved_tcpv6_prot, sk->sk_prot); | ||
512 | } | ||
513 | mutex_unlock(&tcpv6_prot_mutex); | ||
514 | } | ||
515 | |||
482 | ctx->tx_conf = TLS_BASE_TX; | 516 | ctx->tx_conf = TLS_BASE_TX; |
483 | update_sk_prot(sk, ctx); | 517 | update_sk_prot(sk, ctx); |
484 | out: | 518 | out: |
@@ -493,21 +527,9 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { | |||
493 | .init = tls_init, | 527 | .init = tls_init, |
494 | }; | 528 | }; |
495 | 529 | ||
496 | static void build_protos(struct proto *prot, struct proto *base) | ||
497 | { | ||
498 | prot[TLS_BASE_TX] = *base; | ||
499 | prot[TLS_BASE_TX].setsockopt = tls_setsockopt; | ||
500 | prot[TLS_BASE_TX].getsockopt = tls_getsockopt; | ||
501 | prot[TLS_BASE_TX].close = tls_sk_proto_close; | ||
502 | |||
503 | prot[TLS_SW_TX] = prot[TLS_BASE_TX]; | ||
504 | prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg; | ||
505 | prot[TLS_SW_TX].sendpage = tls_sw_sendpage; | ||
506 | } | ||
507 | |||
508 | static int __init tls_register(void) | 530 | static int __init tls_register(void) |
509 | { | 531 | { |
510 | build_protos(tls_prots, &tcp_prot); | 532 | build_protos(tls_prots[TLSV4], &tcp_prot); |
511 | 533 | ||
512 | tcp_register_ulp(&tcp_tls_ulp_ops); | 534 | tcp_register_ulp(&tcp_tls_ulp_ops); |
513 | 535 | ||
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 1abcc4fc4df1..41722046b937 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig | |||
@@ -34,9 +34,10 @@ config CFG80211 | |||
34 | 34 | ||
35 | When built as a module it will be called cfg80211. | 35 | When built as a module it will be called cfg80211. |
36 | 36 | ||
37 | if CFG80211 | ||
38 | |||
37 | config NL80211_TESTMODE | 39 | config NL80211_TESTMODE |
38 | bool "nl80211 testmode command" | 40 | bool "nl80211 testmode command" |
39 | depends on CFG80211 | ||
40 | help | 41 | help |
41 | The nl80211 testmode command helps implementing things like | 42 | The nl80211 testmode command helps implementing things like |
42 | factory calibration or validation tools for wireless chips. | 43 | factory calibration or validation tools for wireless chips. |
@@ -51,7 +52,6 @@ config NL80211_TESTMODE | |||
51 | 52 | ||
52 | config CFG80211_DEVELOPER_WARNINGS | 53 | config CFG80211_DEVELOPER_WARNINGS |
53 | bool "enable developer warnings" | 54 | bool "enable developer warnings" |
54 | depends on CFG80211 | ||
55 | default n | 55 | default n |
56 | help | 56 | help |
57 | This option enables some additional warnings that help | 57 | This option enables some additional warnings that help |
@@ -68,7 +68,7 @@ config CFG80211_DEVELOPER_WARNINGS | |||
68 | 68 | ||
69 | config CFG80211_CERTIFICATION_ONUS | 69 | config CFG80211_CERTIFICATION_ONUS |
70 | bool "cfg80211 certification onus" | 70 | bool "cfg80211 certification onus" |
71 | depends on CFG80211 && EXPERT | 71 | depends on EXPERT |
72 | default n | 72 | default n |
73 | ---help--- | 73 | ---help--- |
74 | You should disable this option unless you are both capable | 74 | You should disable this option unless you are both capable |
@@ -159,7 +159,6 @@ config CFG80211_REG_RELAX_NO_IR | |||
159 | 159 | ||
160 | config CFG80211_DEFAULT_PS | 160 | config CFG80211_DEFAULT_PS |
161 | bool "enable powersave by default" | 161 | bool "enable powersave by default" |
162 | depends on CFG80211 | ||
163 | default y | 162 | default y |
164 | help | 163 | help |
165 | This option enables powersave mode by default. | 164 | This option enables powersave mode by default. |
@@ -170,7 +169,6 @@ config CFG80211_DEFAULT_PS | |||
170 | 169 | ||
171 | config CFG80211_DEBUGFS | 170 | config CFG80211_DEBUGFS |
172 | bool "cfg80211 DebugFS entries" | 171 | bool "cfg80211 DebugFS entries" |
173 | depends on CFG80211 | ||
174 | depends on DEBUG_FS | 172 | depends on DEBUG_FS |
175 | ---help--- | 173 | ---help--- |
176 | You can enable this if you want debugfs entries for cfg80211. | 174 | You can enable this if you want debugfs entries for cfg80211. |
@@ -180,7 +178,6 @@ config CFG80211_DEBUGFS | |||
180 | config CFG80211_CRDA_SUPPORT | 178 | config CFG80211_CRDA_SUPPORT |
181 | bool "support CRDA" if EXPERT | 179 | bool "support CRDA" if EXPERT |
182 | default y | 180 | default y |
183 | depends on CFG80211 | ||
184 | help | 181 | help |
185 | You should enable this option unless you know for sure you have no | 182 | You should enable this option unless you know for sure you have no |
186 | need for it, for example when using internal regdb (above) or the | 183 | need for it, for example when using internal regdb (above) or the |
@@ -190,7 +187,6 @@ config CFG80211_CRDA_SUPPORT | |||
190 | 187 | ||
191 | config CFG80211_WEXT | 188 | config CFG80211_WEXT |
192 | bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT | 189 | bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT |
193 | depends on CFG80211 | ||
194 | select WEXT_CORE | 190 | select WEXT_CORE |
195 | default y if CFG80211_WEXT_EXPORT | 191 | default y if CFG80211_WEXT_EXPORT |
196 | help | 192 | help |
@@ -199,11 +195,12 @@ config CFG80211_WEXT | |||
199 | 195 | ||
200 | config CFG80211_WEXT_EXPORT | 196 | config CFG80211_WEXT_EXPORT |
201 | bool | 197 | bool |
202 | depends on CFG80211 | ||
203 | help | 198 | help |
204 | Drivers should select this option if they require cfg80211's | 199 | Drivers should select this option if they require cfg80211's |
205 | wext compatibility symbols to be exported. | 200 | wext compatibility symbols to be exported. |
206 | 201 | ||
202 | endif # CFG80211 | ||
203 | |||
207 | config LIB80211 | 204 | config LIB80211 |
208 | tristate | 205 | tristate |
209 | default n | 206 | default n |
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 8e70291e586a..e87d6c4dd5b6 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c | |||
@@ -217,7 +217,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) | |||
217 | if (skb->len <= mtu) | 217 | if (skb->len <= mtu) |
218 | goto ok; | 218 | goto ok; |
219 | 219 | ||
220 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 220 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
221 | goto ok; | 221 | goto ok; |
222 | } | 222 | } |
223 | 223 | ||
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile index 0e349b80686e..ba942e3ead89 100644 --- a/samples/seccomp/Makefile +++ b/samples/seccomp/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | ifndef CROSS_COMPILE | ||
2 | hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct | 3 | hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct |
3 | 4 | ||
4 | HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include | 5 | HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include |
@@ -16,7 +17,6 @@ HOSTCFLAGS_bpf-direct.o += -idirafter $(objtree)/include | |||
16 | bpf-direct-objs := bpf-direct.o | 17 | bpf-direct-objs := bpf-direct.o |
17 | 18 | ||
18 | # Try to match the kernel target. | 19 | # Try to match the kernel target. |
19 | ifndef CROSS_COMPILE | ||
20 | ifndef CONFIG_64BIT | 20 | ifndef CONFIG_64BIT |
21 | 21 | ||
22 | # s390 has -m31 flag to build 31 bit binaries | 22 | # s390 has -m31 flag to build 31 bit binaries |
@@ -35,12 +35,4 @@ HOSTLOADLIBES_bpf-fancy += $(MFLAG) | |||
35 | HOSTLOADLIBES_dropper += $(MFLAG) | 35 | HOSTLOADLIBES_dropper += $(MFLAG) |
36 | endif | 36 | endif |
37 | always := $(hostprogs-m) | 37 | always := $(hostprogs-m) |
38 | else | ||
39 | # MIPS system calls are defined based on the -mabi that is passed | ||
40 | # to the toolchain which may or may not be a valid option | ||
41 | # for the host toolchain. So disable tests if target architecture | ||
42 | # is MIPS but the host isn't. | ||
43 | ifndef CONFIG_MIPS | ||
44 | always := $(hostprogs-m) | ||
45 | endif | ||
46 | endif | 38 | endif |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 47cddf32aeba..4f2b25d43ec9 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -256,6 +256,8 @@ __objtool_obj := $(objtree)/tools/objtool/objtool | |||
256 | 256 | ||
257 | objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check) | 257 | objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check) |
258 | 258 | ||
259 | objtool_args += $(if $(part-of-module), --module,) | ||
260 | |||
259 | ifndef CONFIG_FRAME_POINTER | 261 | ifndef CONFIG_FRAME_POINTER |
260 | objtool_args += --no-fp | 262 | objtool_args += --no-fp |
261 | endif | 263 | endif |
@@ -264,6 +266,12 @@ objtool_args += --no-unreachable | |||
264 | else | 266 | else |
265 | objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) | 267 | objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) |
266 | endif | 268 | endif |
269 | ifdef CONFIG_RETPOLINE | ||
270 | ifneq ($(RETPOLINE_CFLAGS),) | ||
271 | objtool_args += --retpoline | ||
272 | endif | ||
273 | endif | ||
274 | |||
267 | 275 | ||
268 | ifdef CONFIG_MODVERSIONS | 276 | ifdef CONFIG_MODVERSIONS |
269 | objtool_o = $(@D)/.tmp_$(@F) | 277 | objtool_o = $(@D)/.tmp_$(@F) |
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 5589bae34af6..a6f538b31ad6 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib | |||
@@ -297,11 +297,11 @@ cmd_dt_S_dtb= \ | |||
297 | echo '\#include <asm-generic/vmlinux.lds.h>'; \ | 297 | echo '\#include <asm-generic/vmlinux.lds.h>'; \ |
298 | echo '.section .dtb.init.rodata,"a"'; \ | 298 | echo '.section .dtb.init.rodata,"a"'; \ |
299 | echo '.balign STRUCT_ALIGNMENT'; \ | 299 | echo '.balign STRUCT_ALIGNMENT'; \ |
300 | echo '.global __dtb_$(*F)_begin'; \ | 300 | echo '.global __dtb_$(subst -,_,$(*F))_begin'; \ |
301 | echo '__dtb_$(*F)_begin:'; \ | 301 | echo '__dtb_$(subst -,_,$(*F))_begin:'; \ |
302 | echo '.incbin "$<" '; \ | 302 | echo '.incbin "$<" '; \ |
303 | echo '__dtb_$(*F)_end:'; \ | 303 | echo '__dtb_$(subst -,_,$(*F))_end:'; \ |
304 | echo '.global __dtb_$(*F)_end'; \ | 304 | echo '.global __dtb_$(subst -,_,$(*F))_end'; \ |
305 | echo '.balign STRUCT_ALIGNMENT'; \ | 305 | echo '.balign STRUCT_ALIGNMENT'; \ |
306 | ) > $@ | 306 | ) > $@ |
307 | 307 | ||
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c index fa3d39b6f23b..449b68c4c90c 100644 --- a/scripts/basic/fixdep.c +++ b/scripts/basic/fixdep.c | |||
@@ -93,14 +93,6 @@ | |||
93 | * (Note: it'd be easy to port over the complete mkdep state machine, | 93 | * (Note: it'd be easy to port over the complete mkdep state machine, |
94 | * but I don't think the added complexity is worth it) | 94 | * but I don't think the added complexity is worth it) |
95 | */ | 95 | */ |
96 | /* | ||
97 | * Note 2: if somebody writes HELLO_CONFIG_BOOM in a file, it will depend onto | ||
98 | * CONFIG_BOOM. This could seem a bug (not too hard to fix), but please do not | ||
99 | * fix it! Some UserModeLinux files (look at arch/um/) call CONFIG_BOOM as | ||
100 | * UML_CONFIG_BOOM, to avoid conflicts with /usr/include/linux/autoconf.h, | ||
101 | * through arch/um/include/uml-config.h; this fixdep "bug" makes sure that | ||
102 | * those files will have correct dependencies. | ||
103 | */ | ||
104 | 96 | ||
105 | #include <sys/types.h> | 97 | #include <sys/types.h> |
106 | #include <sys/stat.h> | 98 | #include <sys/stat.h> |
@@ -233,8 +225,13 @@ static int str_ends_with(const char *s, int slen, const char *sub) | |||
233 | static void parse_config_file(const char *p) | 225 | static void parse_config_file(const char *p) |
234 | { | 226 | { |
235 | const char *q, *r; | 227 | const char *q, *r; |
228 | const char *start = p; | ||
236 | 229 | ||
237 | while ((p = strstr(p, "CONFIG_"))) { | 230 | while ((p = strstr(p, "CONFIG_"))) { |
231 | if (p > start && (isalnum(p[-1]) || p[-1] == '_')) { | ||
232 | p += 7; | ||
233 | continue; | ||
234 | } | ||
238 | p += 7; | 235 | p += 7; |
239 | q = p; | 236 | q = p; |
240 | while (*q && (isalnum(*q) || *q == '_')) | 237 | while (*q && (isalnum(*q) || *q == '_')) |
@@ -286,8 +283,6 @@ static int is_ignored_file(const char *s, int len) | |||
286 | { | 283 | { |
287 | return str_ends_with(s, len, "include/generated/autoconf.h") || | 284 | return str_ends_with(s, len, "include/generated/autoconf.h") || |
288 | str_ends_with(s, len, "include/generated/autoksyms.h") || | 285 | str_ends_with(s, len, "include/generated/autoksyms.h") || |
289 | str_ends_with(s, len, "arch/um/include/uml-config.h") || | ||
290 | str_ends_with(s, len, "include/linux/kconfig.h") || | ||
291 | str_ends_with(s, len, ".ver"); | 286 | str_ends_with(s, len, ".ver"); |
292 | } | 287 | } |
293 | 288 | ||
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter index 94b664817ad9..d84a5674e95e 100755 --- a/scripts/bloat-o-meter +++ b/scripts/bloat-o-meter | |||
@@ -15,7 +15,7 @@ signal(SIGPIPE, SIG_DFL) | |||
15 | if len(sys.argv) < 3: | 15 | if len(sys.argv) < 3: |
16 | sys.stderr.write("usage: %s [option] file1 file2\n" % sys.argv[0]) | 16 | sys.stderr.write("usage: %s [option] file1 file2\n" % sys.argv[0]) |
17 | sys.stderr.write("The options are:\n") | 17 | sys.stderr.write("The options are:\n") |
18 | sys.stderr.write("-c cateogrize output based on symbole type\n") | 18 | sys.stderr.write("-c categorize output based on symbol type\n") |
19 | sys.stderr.write("-d Show delta of Data Section\n") | 19 | sys.stderr.write("-d Show delta of Data Section\n") |
20 | sys.stderr.write("-t Show delta of text Section\n") | 20 | sys.stderr.write("-t Show delta of text Section\n") |
21 | sys.exit(-1) | 21 | sys.exit(-1) |
diff --git a/scripts/coccinelle/api/memdup.cocci b/scripts/coccinelle/api/memdup.cocci index 1249b727644b..8fd6437beda8 100644 --- a/scripts/coccinelle/api/memdup.cocci +++ b/scripts/coccinelle/api/memdup.cocci | |||
@@ -56,10 +56,10 @@ statement S; | |||
56 | p << r.p; | 56 | p << r.p; |
57 | @@ | 57 | @@ |
58 | 58 | ||
59 | coccilib.org.print_todo(p[0], "WARNING opportunity for kmemdep") | 59 | coccilib.org.print_todo(p[0], "WARNING opportunity for kmemdup") |
60 | 60 | ||
61 | @script:python depends on report@ | 61 | @script:python depends on report@ |
62 | p << r.p; | 62 | p << r.p; |
63 | @@ | 63 | @@ |
64 | 64 | ||
65 | coccilib.report.print_report(p[0], "WARNING opportunity for kmemdep") | 65 | coccilib.report.print_report(p[0], "WARNING opportunity for kmemdup") |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 9ee9bf7fd1a2..65792650c630 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c | |||
@@ -595,7 +595,7 @@ static void optimize_result(void) | |||
595 | * original char code */ | 595 | * original char code */ |
596 | if (!best_table_len[i]) { | 596 | if (!best_table_len[i]) { |
597 | 597 | ||
598 | /* find the token with the breates profit value */ | 598 | /* find the token with the best profit value */ |
599 | best = find_best_token(); | 599 | best = find_best_token(); |
600 | if (token_profit[best] == 0) | 600 | if (token_profit[best] == 0) |
601 | break; | 601 | break; |
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 5c12dc91ef34..df26c7b0fe13 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c | |||
@@ -178,7 +178,7 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p) | |||
178 | case S_HEX: | 178 | case S_HEX: |
179 | done: | 179 | done: |
180 | if (sym_string_valid(sym, p)) { | 180 | if (sym_string_valid(sym, p)) { |
181 | sym->def[def].val = strdup(p); | 181 | sym->def[def].val = xstrdup(p); |
182 | sym->flags |= def_flags; | 182 | sym->flags |= def_flags; |
183 | } else { | 183 | } else { |
184 | if (def != S_DEF_AUTO) | 184 | if (def != S_DEF_AUTO) |
diff --git a/scripts/kconfig/kxgettext.c b/scripts/kconfig/kxgettext.c index 2858738b22d5..240880a89111 100644 --- a/scripts/kconfig/kxgettext.c +++ b/scripts/kconfig/kxgettext.c | |||
@@ -101,7 +101,7 @@ static struct message *message__new(const char *msg, char *option, | |||
101 | if (self->files == NULL) | 101 | if (self->files == NULL) |
102 | goto out_fail; | 102 | goto out_fail; |
103 | 103 | ||
104 | self->msg = strdup(msg); | 104 | self->msg = xstrdup(msg); |
105 | if (self->msg == NULL) | 105 | if (self->msg == NULL) |
106 | goto out_fail_msg; | 106 | goto out_fail_msg; |
107 | 107 | ||
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h index 4e23febbe4b2..2d5ec2d0e952 100644 --- a/scripts/kconfig/lkc.h +++ b/scripts/kconfig/lkc.h | |||
@@ -115,6 +115,7 @@ int file_write_dep(const char *name); | |||
115 | void *xmalloc(size_t size); | 115 | void *xmalloc(size_t size); |
116 | void *xcalloc(size_t nmemb, size_t size); | 116 | void *xcalloc(size_t nmemb, size_t size); |
117 | void *xrealloc(void *p, size_t size); | 117 | void *xrealloc(void *p, size_t size); |
118 | char *xstrdup(const char *s); | ||
118 | 119 | ||
119 | struct gstr { | 120 | struct gstr { |
120 | size_t len; | 121 | size_t len; |
diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh index a10bd9d6fafd..6c0bcd9c472d 100755 --- a/scripts/kconfig/lxdialog/check-lxdialog.sh +++ b/scripts/kconfig/lxdialog/check-lxdialog.sh | |||
@@ -55,7 +55,8 @@ EOF | |||
55 | echo " *** required header files." 1>&2 | 55 | echo " *** required header files." 1>&2 |
56 | echo " *** 'make menuconfig' requires the ncurses libraries." 1>&2 | 56 | echo " *** 'make menuconfig' requires the ncurses libraries." 1>&2 |
57 | echo " *** " 1>&2 | 57 | echo " *** " 1>&2 |
58 | echo " *** Install ncurses (ncurses-devel) and try again." 1>&2 | 58 | echo " *** Install ncurses (ncurses-devel or libncurses-dev " 1>&2 |
59 | echo " *** depending on your distribution) and try again." 1>&2 | ||
59 | echo " *** " 1>&2 | 60 | echo " *** " 1>&2 |
60 | exit 1 | 61 | exit 1 |
61 | fi | 62 | fi |
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index 99222855544c..36cd3e1f1c28 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c | |||
@@ -212,6 +212,7 @@ void menu_add_option(int token, char *arg) | |||
212 | sym_defconfig_list = current_entry->sym; | 212 | sym_defconfig_list = current_entry->sym; |
213 | else if (sym_defconfig_list != current_entry->sym) | 213 | else if (sym_defconfig_list != current_entry->sym) |
214 | zconf_error("trying to redefine defconfig symbol"); | 214 | zconf_error("trying to redefine defconfig symbol"); |
215 | sym_defconfig_list->flags |= SYMBOL_AUTO; | ||
215 | break; | 216 | break; |
216 | case T_OPT_ENV: | 217 | case T_OPT_ENV: |
217 | prop_add_env(arg); | 218 | prop_add_env(arg); |
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c index cca9663be5dd..2220bc4b051b 100644 --- a/scripts/kconfig/symbol.c +++ b/scripts/kconfig/symbol.c | |||
@@ -183,7 +183,7 @@ static void sym_validate_range(struct symbol *sym) | |||
183 | sprintf(str, "%lld", val2); | 183 | sprintf(str, "%lld", val2); |
184 | else | 184 | else |
185 | sprintf(str, "0x%llx", val2); | 185 | sprintf(str, "0x%llx", val2); |
186 | sym->curr.val = strdup(str); | 186 | sym->curr.val = xstrdup(str); |
187 | } | 187 | } |
188 | 188 | ||
189 | static void sym_set_changed(struct symbol *sym) | 189 | static void sym_set_changed(struct symbol *sym) |
@@ -849,7 +849,7 @@ struct symbol *sym_lookup(const char *name, int flags) | |||
849 | : !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE)))) | 849 | : !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE)))) |
850 | return symbol; | 850 | return symbol; |
851 | } | 851 | } |
852 | new_name = strdup(name); | 852 | new_name = xstrdup(name); |
853 | } else { | 853 | } else { |
854 | new_name = NULL; | 854 | new_name = NULL; |
855 | hash = 0; | 855 | hash = 0; |
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c index b98a79e30e04..c6f6e21b809f 100644 --- a/scripts/kconfig/util.c +++ b/scripts/kconfig/util.c | |||
@@ -154,3 +154,14 @@ void *xrealloc(void *p, size_t size) | |||
154 | fprintf(stderr, "Out of memory.\n"); | 154 | fprintf(stderr, "Out of memory.\n"); |
155 | exit(1); | 155 | exit(1); |
156 | } | 156 | } |
157 | |||
158 | char *xstrdup(const char *s) | ||
159 | { | ||
160 | char *p; | ||
161 | |||
162 | p = strdup(s); | ||
163 | if (p) | ||
164 | return p; | ||
165 | fprintf(stderr, "Out of memory.\n"); | ||
166 | exit(1); | ||
167 | } | ||
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l index 02de6fe302a9..88b650eb9cc9 100644 --- a/scripts/kconfig/zconf.l +++ b/scripts/kconfig/zconf.l | |||
@@ -332,16 +332,12 @@ void zconf_nextfile(const char *name) | |||
332 | "Inclusion path:\n current file : '%s'\n", | 332 | "Inclusion path:\n current file : '%s'\n", |
333 | zconf_curname(), zconf_lineno(), | 333 | zconf_curname(), zconf_lineno(), |
334 | zconf_curname()); | 334 | zconf_curname()); |
335 | iter = current_file->parent; | 335 | iter = current_file; |
336 | while (iter && \ | 336 | do { |
337 | strcmp(iter->name,current_file->name)) { | ||
338 | fprintf(stderr, " included from: '%s:%d'\n", | ||
339 | iter->name, iter->lineno-1); | ||
340 | iter = iter->parent; | 337 | iter = iter->parent; |
341 | } | ||
342 | if (iter) | ||
343 | fprintf(stderr, " included from: '%s:%d'\n", | 338 | fprintf(stderr, " included from: '%s:%d'\n", |
344 | iter->name, iter->lineno+1); | 339 | iter->name, iter->lineno - 1); |
340 | } while (strcmp(iter->name, current_file->name)); | ||
345 | exit(1); | 341 | exit(1); |
346 | } | 342 | } |
347 | } | 343 | } |
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y index 4be98050b961..ad6305b0f40c 100644 --- a/scripts/kconfig/zconf.y +++ b/scripts/kconfig/zconf.y | |||
@@ -127,7 +127,7 @@ no_mainmenu_stmt: /* empty */ | |||
127 | * later regardless of whether it comes from the 'prompt' in | 127 | * later regardless of whether it comes from the 'prompt' in |
128 | * mainmenu_stmt or here | 128 | * mainmenu_stmt or here |
129 | */ | 129 | */ |
130 | menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL); | 130 | menu_add_prompt(P_MENU, xstrdup("Linux Kernel Configuration"), NULL); |
131 | }; | 131 | }; |
132 | 132 | ||
133 | 133 | ||
@@ -276,6 +276,7 @@ choice: T_CHOICE word_opt T_EOL | |||
276 | sym->flags |= SYMBOL_AUTO; | 276 | sym->flags |= SYMBOL_AUTO; |
277 | menu_add_entry(sym); | 277 | menu_add_entry(sym); |
278 | menu_add_expr(P_CHOICE, NULL, NULL); | 278 | menu_add_expr(P_CHOICE, NULL, NULL); |
279 | free($2); | ||
279 | printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno()); | 280 | printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno()); |
280 | }; | 281 | }; |
281 | 282 | ||
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index c0d129d7f430..be56a1153014 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh | |||
@@ -246,7 +246,7 @@ else | |||
246 | fi; | 246 | fi; |
247 | 247 | ||
248 | # final build of init/ | 248 | # final build of init/ |
249 | ${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" | 249 | ${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init |
250 | 250 | ||
251 | archive_builtin | 251 | archive_builtin |
252 | 252 | ||
diff --git a/sound/core/control.c b/sound/core/control.c index 0b3026d937b1..8a77620a3854 100644 --- a/sound/core/control.c +++ b/sound/core/control.c | |||
@@ -889,7 +889,7 @@ static int snd_ctl_elem_read(struct snd_card *card, | |||
889 | 889 | ||
890 | index_offset = snd_ctl_get_ioff(kctl, &control->id); | 890 | index_offset = snd_ctl_get_ioff(kctl, &control->id); |
891 | vd = &kctl->vd[index_offset]; | 891 | vd = &kctl->vd[index_offset]; |
892 | if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get == NULL) | 892 | if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL) |
893 | return -EPERM; | 893 | return -EPERM; |
894 | 894 | ||
895 | snd_ctl_build_ioff(&control->id, kctl, index_offset); | 895 | snd_ctl_build_ioff(&control->id, kctl, index_offset); |
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index b044c0a5a674..02298c9c6020 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c | |||
@@ -1762,10 +1762,9 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) | |||
1762 | return -ENOMEM; | 1762 | return -ENOMEM; |
1763 | _snd_pcm_hw_params_any(params); | 1763 | _snd_pcm_hw_params_any(params); |
1764 | err = snd_pcm_hw_refine(substream, params); | 1764 | err = snd_pcm_hw_refine(substream, params); |
1765 | format_mask = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT); | ||
1766 | kfree(params); | ||
1767 | if (err < 0) | 1765 | if (err < 0) |
1768 | return err; | 1766 | goto error; |
1767 | format_mask = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT); | ||
1769 | for (fmt = 0; fmt < 32; ++fmt) { | 1768 | for (fmt = 0; fmt < 32; ++fmt) { |
1770 | if (snd_mask_test(format_mask, fmt)) { | 1769 | if (snd_mask_test(format_mask, fmt)) { |
1771 | int f = snd_pcm_oss_format_to(fmt); | 1770 | int f = snd_pcm_oss_format_to(fmt); |
@@ -1773,7 +1772,10 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) | |||
1773 | formats |= f; | 1772 | formats |= f; |
1774 | } | 1773 | } |
1775 | } | 1774 | } |
1776 | return formats; | 1775 | |
1776 | error: | ||
1777 | kfree(params); | ||
1778 | return err < 0 ? err : formats; | ||
1777 | } | 1779 | } |
1778 | 1780 | ||
1779 | static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format) | 1781 | static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format) |
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 04d4db44fae5..61a07fe34cd2 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c | |||
@@ -255,12 +255,12 @@ static int seq_free_client1(struct snd_seq_client *client) | |||
255 | 255 | ||
256 | if (!client) | 256 | if (!client) |
257 | return 0; | 257 | return 0; |
258 | snd_seq_delete_all_ports(client); | ||
259 | snd_seq_queue_client_leave(client->number); | ||
260 | spin_lock_irqsave(&clients_lock, flags); | 258 | spin_lock_irqsave(&clients_lock, flags); |
261 | clienttablock[client->number] = 1; | 259 | clienttablock[client->number] = 1; |
262 | clienttab[client->number] = NULL; | 260 | clienttab[client->number] = NULL; |
263 | spin_unlock_irqrestore(&clients_lock, flags); | 261 | spin_unlock_irqrestore(&clients_lock, flags); |
262 | snd_seq_delete_all_ports(client); | ||
263 | snd_seq_queue_client_leave(client->number); | ||
264 | snd_use_lock_sync(&client->use_lock); | 264 | snd_use_lock_sync(&client->use_lock); |
265 | snd_seq_queue_client_termination(client->number); | 265 | snd_seq_queue_client_termination(client->number); |
266 | if (client->pool) | 266 | if (client->pool) |
@@ -910,7 +910,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop) | |||
910 | static int snd_seq_client_enqueue_event(struct snd_seq_client *client, | 910 | static int snd_seq_client_enqueue_event(struct snd_seq_client *client, |
911 | struct snd_seq_event *event, | 911 | struct snd_seq_event *event, |
912 | struct file *file, int blocking, | 912 | struct file *file, int blocking, |
913 | int atomic, int hop) | 913 | int atomic, int hop, |
914 | struct mutex *mutexp) | ||
914 | { | 915 | { |
915 | struct snd_seq_event_cell *cell; | 916 | struct snd_seq_event_cell *cell; |
916 | int err; | 917 | int err; |
@@ -948,7 +949,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client, | |||
948 | return -ENXIO; /* queue is not allocated */ | 949 | return -ENXIO; /* queue is not allocated */ |
949 | 950 | ||
950 | /* allocate an event cell */ | 951 | /* allocate an event cell */ |
951 | err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); | 952 | err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, |
953 | file, mutexp); | ||
952 | if (err < 0) | 954 | if (err < 0) |
953 | return err; | 955 | return err; |
954 | 956 | ||
@@ -1017,12 +1019,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, | |||
1017 | return -ENXIO; | 1019 | return -ENXIO; |
1018 | 1020 | ||
1019 | /* allocate the pool now if the pool is not allocated yet */ | 1021 | /* allocate the pool now if the pool is not allocated yet */ |
1022 | mutex_lock(&client->ioctl_mutex); | ||
1020 | if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { | 1023 | if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { |
1021 | mutex_lock(&client->ioctl_mutex); | ||
1022 | err = snd_seq_pool_init(client->pool); | 1024 | err = snd_seq_pool_init(client->pool); |
1023 | mutex_unlock(&client->ioctl_mutex); | ||
1024 | if (err < 0) | 1025 | if (err < 0) |
1025 | return -ENOMEM; | 1026 | goto out; |
1026 | } | 1027 | } |
1027 | 1028 | ||
1028 | /* only process whole events */ | 1029 | /* only process whole events */ |
@@ -1073,7 +1074,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, | |||
1073 | /* ok, enqueue it */ | 1074 | /* ok, enqueue it */ |
1074 | err = snd_seq_client_enqueue_event(client, &event, file, | 1075 | err = snd_seq_client_enqueue_event(client, &event, file, |
1075 | !(file->f_flags & O_NONBLOCK), | 1076 | !(file->f_flags & O_NONBLOCK), |
1076 | 0, 0); | 1077 | 0, 0, &client->ioctl_mutex); |
1077 | if (err < 0) | 1078 | if (err < 0) |
1078 | break; | 1079 | break; |
1079 | 1080 | ||
@@ -1084,6 +1085,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, | |||
1084 | written += len; | 1085 | written += len; |
1085 | } | 1086 | } |
1086 | 1087 | ||
1088 | out: | ||
1089 | mutex_unlock(&client->ioctl_mutex); | ||
1087 | return written ? written : err; | 1090 | return written ? written : err; |
1088 | } | 1091 | } |
1089 | 1092 | ||
@@ -1838,9 +1841,11 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client, | |||
1838 | (! snd_seq_write_pool_allocated(client) || | 1841 | (! snd_seq_write_pool_allocated(client) || |
1839 | info->output_pool != client->pool->size)) { | 1842 | info->output_pool != client->pool->size)) { |
1840 | if (snd_seq_write_pool_allocated(client)) { | 1843 | if (snd_seq_write_pool_allocated(client)) { |
1844 | /* is the pool in use? */ | ||
1845 | if (atomic_read(&client->pool->counter)) | ||
1846 | return -EBUSY; | ||
1841 | /* remove all existing cells */ | 1847 | /* remove all existing cells */ |
1842 | snd_seq_pool_mark_closing(client->pool); | 1848 | snd_seq_pool_mark_closing(client->pool); |
1843 | snd_seq_queue_client_leave_cells(client->number); | ||
1844 | snd_seq_pool_done(client->pool); | 1849 | snd_seq_pool_done(client->pool); |
1845 | } | 1850 | } |
1846 | client->pool->size = info->output_pool; | 1851 | client->pool->size = info->output_pool; |
@@ -2260,7 +2265,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev, | |||
2260 | if (! cptr->accept_output) | 2265 | if (! cptr->accept_output) |
2261 | result = -EPERM; | 2266 | result = -EPERM; |
2262 | else /* send it */ | 2267 | else /* send it */ |
2263 | result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop); | 2268 | result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, |
2269 | atomic, hop, NULL); | ||
2264 | 2270 | ||
2265 | snd_seq_client_unlock(cptr); | 2271 | snd_seq_client_unlock(cptr); |
2266 | return result; | 2272 | return result; |
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c index a8c2822e0198..72c0302a55d2 100644 --- a/sound/core/seq/seq_fifo.c +++ b/sound/core/seq/seq_fifo.c | |||
@@ -125,7 +125,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, | |||
125 | return -EINVAL; | 125 | return -EINVAL; |
126 | 126 | ||
127 | snd_use_lock_use(&f->use_lock); | 127 | snd_use_lock_use(&f->use_lock); |
128 | err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ | 128 | err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */ |
129 | if (err < 0) { | 129 | if (err < 0) { |
130 | if ((err == -ENOMEM) || (err == -EAGAIN)) | 130 | if ((err == -ENOMEM) || (err == -EAGAIN)) |
131 | atomic_inc(&f->overflow); | 131 | atomic_inc(&f->overflow); |
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index f763682584a8..ab1112e90f88 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c | |||
@@ -220,7 +220,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell) | |||
220 | */ | 220 | */ |
221 | static int snd_seq_cell_alloc(struct snd_seq_pool *pool, | 221 | static int snd_seq_cell_alloc(struct snd_seq_pool *pool, |
222 | struct snd_seq_event_cell **cellp, | 222 | struct snd_seq_event_cell **cellp, |
223 | int nonblock, struct file *file) | 223 | int nonblock, struct file *file, |
224 | struct mutex *mutexp) | ||
224 | { | 225 | { |
225 | struct snd_seq_event_cell *cell; | 226 | struct snd_seq_event_cell *cell; |
226 | unsigned long flags; | 227 | unsigned long flags; |
@@ -244,7 +245,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool, | |||
244 | set_current_state(TASK_INTERRUPTIBLE); | 245 | set_current_state(TASK_INTERRUPTIBLE); |
245 | add_wait_queue(&pool->output_sleep, &wait); | 246 | add_wait_queue(&pool->output_sleep, &wait); |
246 | spin_unlock_irq(&pool->lock); | 247 | spin_unlock_irq(&pool->lock); |
248 | if (mutexp) | ||
249 | mutex_unlock(mutexp); | ||
247 | schedule(); | 250 | schedule(); |
251 | if (mutexp) | ||
252 | mutex_lock(mutexp); | ||
248 | spin_lock_irq(&pool->lock); | 253 | spin_lock_irq(&pool->lock); |
249 | remove_wait_queue(&pool->output_sleep, &wait); | 254 | remove_wait_queue(&pool->output_sleep, &wait); |
250 | /* interrupted? */ | 255 | /* interrupted? */ |
@@ -287,7 +292,7 @@ __error: | |||
287 | */ | 292 | */ |
288 | int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, | 293 | int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, |
289 | struct snd_seq_event_cell **cellp, int nonblock, | 294 | struct snd_seq_event_cell **cellp, int nonblock, |
290 | struct file *file) | 295 | struct file *file, struct mutex *mutexp) |
291 | { | 296 | { |
292 | int ncells, err; | 297 | int ncells, err; |
293 | unsigned int extlen; | 298 | unsigned int extlen; |
@@ -304,7 +309,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, | |||
304 | if (ncells >= pool->total_elements) | 309 | if (ncells >= pool->total_elements) |
305 | return -ENOMEM; | 310 | return -ENOMEM; |
306 | 311 | ||
307 | err = snd_seq_cell_alloc(pool, &cell, nonblock, file); | 312 | err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp); |
308 | if (err < 0) | 313 | if (err < 0) |
309 | return err; | 314 | return err; |
310 | 315 | ||
@@ -330,7 +335,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, | |||
330 | int size = sizeof(struct snd_seq_event); | 335 | int size = sizeof(struct snd_seq_event); |
331 | if (len < size) | 336 | if (len < size) |
332 | size = len; | 337 | size = len; |
333 | err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); | 338 | err = snd_seq_cell_alloc(pool, &tmp, nonblock, file, |
339 | mutexp); | ||
334 | if (err < 0) | 340 | if (err < 0) |
335 | goto __error; | 341 | goto __error; |
336 | if (cell->event.data.ext.ptr == NULL) | 342 | if (cell->event.data.ext.ptr == NULL) |
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h index 32f959c17786..3abe306c394a 100644 --- a/sound/core/seq/seq_memory.h +++ b/sound/core/seq/seq_memory.h | |||
@@ -66,7 +66,8 @@ struct snd_seq_pool { | |||
66 | void snd_seq_cell_free(struct snd_seq_event_cell *cell); | 66 | void snd_seq_cell_free(struct snd_seq_event_cell *cell); |
67 | 67 | ||
68 | int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, | 68 | int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, |
69 | struct snd_seq_event_cell **cellp, int nonblock, struct file *file); | 69 | struct snd_seq_event_cell **cellp, int nonblock, |
70 | struct file *file, struct mutex *mutexp); | ||
70 | 71 | ||
71 | /* return number of unused (free) cells */ | 72 | /* return number of unused (free) cells */ |
72 | static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) | 73 | static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) |
diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c index bc1c8488fc2a..2bc6759e4adc 100644 --- a/sound/core/seq/seq_prioq.c +++ b/sound/core/seq/seq_prioq.c | |||
@@ -87,7 +87,7 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo) | |||
87 | if (f->cells > 0) { | 87 | if (f->cells > 0) { |
88 | /* drain prioQ */ | 88 | /* drain prioQ */ |
89 | while (f->cells > 0) | 89 | while (f->cells > 0) |
90 | snd_seq_cell_free(snd_seq_prioq_cell_out(f)); | 90 | snd_seq_cell_free(snd_seq_prioq_cell_out(f, NULL)); |
91 | } | 91 | } |
92 | 92 | ||
93 | kfree(f); | 93 | kfree(f); |
@@ -214,8 +214,18 @@ int snd_seq_prioq_cell_in(struct snd_seq_prioq * f, | |||
214 | return 0; | 214 | return 0; |
215 | } | 215 | } |
216 | 216 | ||
217 | /* return 1 if the current time >= event timestamp */ | ||
218 | static int event_is_ready(struct snd_seq_event *ev, void *current_time) | ||
219 | { | ||
220 | if ((ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) == SNDRV_SEQ_TIME_STAMP_TICK) | ||
221 | return snd_seq_compare_tick_time(current_time, &ev->time.tick); | ||
222 | else | ||
223 | return snd_seq_compare_real_time(current_time, &ev->time.time); | ||
224 | } | ||
225 | |||
217 | /* dequeue cell from prioq */ | 226 | /* dequeue cell from prioq */ |
218 | struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f) | 227 | struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f, |
228 | void *current_time) | ||
219 | { | 229 | { |
220 | struct snd_seq_event_cell *cell; | 230 | struct snd_seq_event_cell *cell; |
221 | unsigned long flags; | 231 | unsigned long flags; |
@@ -227,6 +237,8 @@ struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f) | |||
227 | spin_lock_irqsave(&f->lock, flags); | 237 | spin_lock_irqsave(&f->lock, flags); |
228 | 238 | ||
229 | cell = f->head; | 239 | cell = f->head; |
240 | if (cell && current_time && !event_is_ready(&cell->event, current_time)) | ||
241 | cell = NULL; | ||
230 | if (cell) { | 242 | if (cell) { |
231 | f->head = cell->next; | 243 | f->head = cell->next; |
232 | 244 | ||
@@ -252,18 +264,6 @@ int snd_seq_prioq_avail(struct snd_seq_prioq * f) | |||
252 | return f->cells; | 264 | return f->cells; |
253 | } | 265 | } |
254 | 266 | ||
255 | |||
256 | /* peek at cell at the head of the prioq */ | ||
257 | struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq * f) | ||
258 | { | ||
259 | if (f == NULL) { | ||
260 | pr_debug("ALSA: seq: snd_seq_prioq_cell_in() called with NULL prioq\n"); | ||
261 | return NULL; | ||
262 | } | ||
263 | return f->head; | ||
264 | } | ||
265 | |||
266 | |||
267 | static inline int prioq_match(struct snd_seq_event_cell *cell, | 267 | static inline int prioq_match(struct snd_seq_event_cell *cell, |
268 | int client, int timestamp) | 268 | int client, int timestamp) |
269 | { | 269 | { |
diff --git a/sound/core/seq/seq_prioq.h b/sound/core/seq/seq_prioq.h index d38bb78d9345..2c315ca10fc4 100644 --- a/sound/core/seq/seq_prioq.h +++ b/sound/core/seq/seq_prioq.h | |||
@@ -44,14 +44,12 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo); | |||
44 | int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell); | 44 | int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell); |
45 | 45 | ||
46 | /* dequeue cell from prioq */ | 46 | /* dequeue cell from prioq */ |
47 | struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f); | 47 | struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f, |
48 | void *current_time); | ||
48 | 49 | ||
49 | /* return number of events available in prioq */ | 50 | /* return number of events available in prioq */ |
50 | int snd_seq_prioq_avail(struct snd_seq_prioq *f); | 51 | int snd_seq_prioq_avail(struct snd_seq_prioq *f); |
51 | 52 | ||
52 | /* peek at cell at the head of the prioq */ | ||
53 | struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq *f); | ||
54 | |||
55 | /* client left queue */ | 53 | /* client left queue */ |
56 | void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp); | 54 | void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp); |
57 | 55 | ||
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index 0428e9061b47..b377f5048352 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c | |||
@@ -277,30 +277,20 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) | |||
277 | 277 | ||
278 | __again: | 278 | __again: |
279 | /* Process tick queue... */ | 279 | /* Process tick queue... */ |
280 | while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) { | 280 | for (;;) { |
281 | if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick, | 281 | cell = snd_seq_prioq_cell_out(q->tickq, |
282 | &cell->event.time.tick)) { | 282 | &q->timer->tick.cur_tick); |
283 | cell = snd_seq_prioq_cell_out(q->tickq); | 283 | if (!cell) |
284 | if (cell) | ||
285 | snd_seq_dispatch_event(cell, atomic, hop); | ||
286 | } else { | ||
287 | /* event remains in the queue */ | ||
288 | break; | 284 | break; |
289 | } | 285 | snd_seq_dispatch_event(cell, atomic, hop); |
290 | } | 286 | } |
291 | 287 | ||
292 | |||
293 | /* Process time queue... */ | 288 | /* Process time queue... */ |
294 | while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) { | 289 | for (;;) { |
295 | if (snd_seq_compare_real_time(&q->timer->cur_time, | 290 | cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time); |
296 | &cell->event.time.time)) { | 291 | if (!cell) |
297 | cell = snd_seq_prioq_cell_out(q->timeq); | ||
298 | if (cell) | ||
299 | snd_seq_dispatch_event(cell, atomic, hop); | ||
300 | } else { | ||
301 | /* event remains in the queue */ | ||
302 | break; | 292 | break; |
303 | } | 293 | snd_seq_dispatch_event(cell, atomic, hop); |
304 | } | 294 | } |
305 | 295 | ||
306 | /* free lock */ | 296 | /* free lock */ |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index c71dcacea807..d5017adf9feb 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -186,6 +186,10 @@ module_param(power_save, xint, 0644); | |||
186 | MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " | 186 | MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " |
187 | "(in second, 0 = disable)."); | 187 | "(in second, 0 = disable)."); |
188 | 188 | ||
189 | static bool pm_blacklist = true; | ||
190 | module_param(pm_blacklist, bool, 0644); | ||
191 | MODULE_PARM_DESC(pm_blacklist, "Enable power-management blacklist"); | ||
192 | |||
189 | /* reset the HD-audio controller in power save mode. | 193 | /* reset the HD-audio controller in power save mode. |
190 | * this may give more power-saving, but will take longer time to | 194 | * this may give more power-saving, but will take longer time to |
191 | * wake up. | 195 | * wake up. |
@@ -2186,6 +2190,24 @@ out_free: | |||
2186 | return err; | 2190 | return err; |
2187 | } | 2191 | } |
2188 | 2192 | ||
2193 | #ifdef CONFIG_PM | ||
2194 | /* On some boards setting power_save to a non 0 value leads to clicking / | ||
2195 | * popping sounds when ever we enter/leave powersaving mode. Ideally we would | ||
2196 | * figure out how to avoid these sounds, but that is not always feasible. | ||
2197 | * So we keep a list of devices where we disable powersaving as its known | ||
2198 | * to causes problems on these devices. | ||
2199 | */ | ||
2200 | static struct snd_pci_quirk power_save_blacklist[] = { | ||
2201 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ | ||
2202 | SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0), | ||
2203 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ | ||
2204 | SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), | ||
2205 | /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ | ||
2206 | SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0), | ||
2207 | {} | ||
2208 | }; | ||
2209 | #endif /* CONFIG_PM */ | ||
2210 | |||
2189 | /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ | 2211 | /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ |
2190 | static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { | 2212 | static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { |
2191 | [AZX_DRIVER_NVIDIA] = 8, | 2213 | [AZX_DRIVER_NVIDIA] = 8, |
@@ -2198,6 +2220,7 @@ static int azx_probe_continue(struct azx *chip) | |||
2198 | struct hdac_bus *bus = azx_bus(chip); | 2220 | struct hdac_bus *bus = azx_bus(chip); |
2199 | struct pci_dev *pci = chip->pci; | 2221 | struct pci_dev *pci = chip->pci; |
2200 | int dev = chip->dev_index; | 2222 | int dev = chip->dev_index; |
2223 | int val; | ||
2201 | int err; | 2224 | int err; |
2202 | 2225 | ||
2203 | hda->probe_continued = 1; | 2226 | hda->probe_continued = 1; |
@@ -2278,7 +2301,21 @@ static int azx_probe_continue(struct azx *chip) | |||
2278 | 2301 | ||
2279 | chip->running = 1; | 2302 | chip->running = 1; |
2280 | azx_add_card_list(chip); | 2303 | azx_add_card_list(chip); |
2281 | snd_hda_set_power_save(&chip->bus, power_save * 1000); | 2304 | |
2305 | val = power_save; | ||
2306 | #ifdef CONFIG_PM | ||
2307 | if (pm_blacklist) { | ||
2308 | const struct snd_pci_quirk *q; | ||
2309 | |||
2310 | q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); | ||
2311 | if (q && val) { | ||
2312 | dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", | ||
2313 | q->subvendor, q->subdevice); | ||
2314 | val = 0; | ||
2315 | } | ||
2316 | } | ||
2317 | #endif /* CONFIG_PM */ | ||
2318 | snd_hda_set_power_save(&chip->bus, val * 1000); | ||
2282 | if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo) | 2319 | if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo) |
2283 | pm_runtime_put_autosuspend(&pci->dev); | 2320 | pm_runtime_put_autosuspend(&pci->dev); |
2284 | 2321 | ||
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 37e1cf8218ff..5b4dbcec6de8 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -957,6 +957,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { | |||
957 | SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), | 957 | SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), |
958 | SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), | 958 | SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), |
959 | SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), | 959 | SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), |
960 | SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), | ||
961 | SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), | ||
960 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), | 962 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), |
961 | SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), | 963 | SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), |
962 | SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), | 964 | SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index ce28f7ce64e6..9af301c6bba2 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -4997,13 +4997,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec, | |||
4997 | 4997 | ||
4998 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { | 4998 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { |
4999 | spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; | 4999 | spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; |
5000 | snd_hda_apply_pincfgs(codec, pincfgs); | ||
5001 | } else if (action == HDA_FIXUP_ACT_INIT) { | ||
5000 | /* Enable DOCK device */ | 5002 | /* Enable DOCK device */ |
5001 | snd_hda_codec_write(codec, 0x17, 0, | 5003 | snd_hda_codec_write(codec, 0x17, 0, |
5002 | AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); | 5004 | AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); |
5003 | /* Enable DOCK device */ | 5005 | /* Enable DOCK device */ |
5004 | snd_hda_codec_write(codec, 0x19, 0, | 5006 | snd_hda_codec_write(codec, 0x19, 0, |
5005 | AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); | 5007 | AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); |
5006 | snd_hda_apply_pincfgs(codec, pincfgs); | ||
5007 | } | 5008 | } |
5008 | } | 5009 | } |
5009 | 5010 | ||
@@ -5273,6 +5274,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec, | |||
5273 | } | 5274 | } |
5274 | } | 5275 | } |
5275 | 5276 | ||
5277 | /* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */ | ||
5278 | static void alc295_fixup_disable_dac3(struct hda_codec *codec, | ||
5279 | const struct hda_fixup *fix, int action) | ||
5280 | { | ||
5281 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { | ||
5282 | hda_nid_t conn[2] = { 0x02, 0x03 }; | ||
5283 | snd_hda_override_conn_list(codec, 0x17, 2, conn); | ||
5284 | } | ||
5285 | } | ||
5286 | |||
5276 | /* Hook to update amp GPIO4 for automute */ | 5287 | /* Hook to update amp GPIO4 for automute */ |
5277 | static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, | 5288 | static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, |
5278 | struct hda_jack_callback *jack) | 5289 | struct hda_jack_callback *jack) |
@@ -5465,6 +5476,7 @@ enum { | |||
5465 | ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, | 5476 | ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, |
5466 | ALC255_FIXUP_DELL_SPK_NOISE, | 5477 | ALC255_FIXUP_DELL_SPK_NOISE, |
5467 | ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, | 5478 | ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, |
5479 | ALC295_FIXUP_DISABLE_DAC3, | ||
5468 | ALC280_FIXUP_HP_HEADSET_MIC, | 5480 | ALC280_FIXUP_HP_HEADSET_MIC, |
5469 | ALC221_FIXUP_HP_FRONT_MIC, | 5481 | ALC221_FIXUP_HP_FRONT_MIC, |
5470 | ALC292_FIXUP_TPT460, | 5482 | ALC292_FIXUP_TPT460, |
@@ -5479,10 +5491,12 @@ enum { | |||
5479 | ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, | 5491 | ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, |
5480 | ALC233_FIXUP_LENOVO_MULTI_CODECS, | 5492 | ALC233_FIXUP_LENOVO_MULTI_CODECS, |
5481 | ALC294_FIXUP_LENOVO_MIC_LOCATION, | 5493 | ALC294_FIXUP_LENOVO_MIC_LOCATION, |
5494 | ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE, | ||
5482 | ALC700_FIXUP_INTEL_REFERENCE, | 5495 | ALC700_FIXUP_INTEL_REFERENCE, |
5483 | ALC274_FIXUP_DELL_BIND_DACS, | 5496 | ALC274_FIXUP_DELL_BIND_DACS, |
5484 | ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, | 5497 | ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, |
5485 | ALC298_FIXUP_TPT470_DOCK, | 5498 | ALC298_FIXUP_TPT470_DOCK, |
5499 | ALC255_FIXUP_DUMMY_LINEOUT_VERB, | ||
5486 | }; | 5500 | }; |
5487 | 5501 | ||
5488 | static const struct hda_fixup alc269_fixups[] = { | 5502 | static const struct hda_fixup alc269_fixups[] = { |
@@ -6197,6 +6211,10 @@ static const struct hda_fixup alc269_fixups[] = { | |||
6197 | .chained = true, | 6211 | .chained = true, |
6198 | .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, | 6212 | .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, |
6199 | }, | 6213 | }, |
6214 | [ALC295_FIXUP_DISABLE_DAC3] = { | ||
6215 | .type = HDA_FIXUP_FUNC, | ||
6216 | .v.func = alc295_fixup_disable_dac3, | ||
6217 | }, | ||
6200 | [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { | 6218 | [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { |
6201 | .type = HDA_FIXUP_PINS, | 6219 | .type = HDA_FIXUP_PINS, |
6202 | .v.pins = (const struct hda_pintbl[]) { | 6220 | .v.pins = (const struct hda_pintbl[]) { |
@@ -6282,6 +6300,18 @@ static const struct hda_fixup alc269_fixups[] = { | |||
6282 | { } | 6300 | { } |
6283 | }, | 6301 | }, |
6284 | }, | 6302 | }, |
6303 | [ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE] = { | ||
6304 | .type = HDA_FIXUP_PINS, | ||
6305 | .v.pins = (const struct hda_pintbl[]) { | ||
6306 | { 0x16, 0x0101102f }, /* Rear Headset HP */ | ||
6307 | { 0x19, 0x02a1913c }, /* use as Front headset mic, without its own jack detect */ | ||
6308 | { 0x1a, 0x01a19030 }, /* Rear Headset MIC */ | ||
6309 | { 0x1b, 0x02011020 }, | ||
6310 | { } | ||
6311 | }, | ||
6312 | .chained = true, | ||
6313 | .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC | ||
6314 | }, | ||
6285 | [ALC700_FIXUP_INTEL_REFERENCE] = { | 6315 | [ALC700_FIXUP_INTEL_REFERENCE] = { |
6286 | .type = HDA_FIXUP_VERBS, | 6316 | .type = HDA_FIXUP_VERBS, |
6287 | .v.verbs = (const struct hda_verb[]) { | 6317 | .v.verbs = (const struct hda_verb[]) { |
@@ -6318,6 +6348,15 @@ static const struct hda_fixup alc269_fixups[] = { | |||
6318 | .chained = true, | 6348 | .chained = true, |
6319 | .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE | 6349 | .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE |
6320 | }, | 6350 | }, |
6351 | [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = { | ||
6352 | .type = HDA_FIXUP_PINS, | ||
6353 | .v.pins = (const struct hda_pintbl[]) { | ||
6354 | { 0x14, 0x0201101f }, | ||
6355 | { } | ||
6356 | }, | ||
6357 | .chained = true, | ||
6358 | .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE | ||
6359 | }, | ||
6321 | }; | 6360 | }; |
6322 | 6361 | ||
6323 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { | 6362 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
@@ -6366,10 +6405,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
6366 | SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), | 6405 | SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), |
6367 | SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), | 6406 | SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), |
6368 | SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), | 6407 | SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), |
6408 | SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3), | ||
6369 | SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), | 6409 | SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), |
6410 | SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE), | ||
6370 | SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), | 6411 | SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), |
6371 | SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), | 6412 | SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), |
6372 | SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), | 6413 | SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), |
6414 | SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), | ||
6373 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 6415 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
6374 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 6416 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
6375 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), | 6417 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
@@ -6507,9 +6549,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
6507 | SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK), | 6549 | SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK), |
6508 | SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), | 6550 | SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6509 | SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), | 6551 | SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6552 | SND_PCI_QUIRK(0x17aa, 0x2249, "Thinkpad", ALC292_FIXUP_TPT460), | ||
6510 | SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), | 6553 | SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6511 | SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), | 6554 | SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6512 | SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), | 6555 | SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6556 | SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | ||
6513 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 6557 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
6514 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 6558 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
6515 | SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | 6559 | SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
@@ -6871,7 +6915,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
6871 | {0x12, 0x90a60120}, | 6915 | {0x12, 0x90a60120}, |
6872 | {0x14, 0x90170110}, | 6916 | {0x14, 0x90170110}, |
6873 | {0x21, 0x0321101f}), | 6917 | {0x21, 0x0321101f}), |
6874 | SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, | 6918 | SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, |
6875 | {0x12, 0xb7a60130}, | 6919 | {0x12, 0xb7a60130}, |
6876 | {0x14, 0x90170110}, | 6920 | {0x14, 0x90170110}, |
6877 | {0x21, 0x04211020}), | 6921 | {0x21, 0x04211020}), |
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c index c33a512283a4..9fb356db3ab2 100644 --- a/sound/soc/amd/acp-pcm-dma.c +++ b/sound/soc/amd/acp-pcm-dma.c | |||
@@ -579,13 +579,6 @@ static int acp_init(void __iomem *acp_mmio, u32 asic_type) | |||
579 | for (bank = 1; bank < 48; bank++) | 579 | for (bank = 1; bank < 48; bank++) |
580 | acp_set_sram_bank_state(acp_mmio, bank, false); | 580 | acp_set_sram_bank_state(acp_mmio, bank, false); |
581 | } | 581 | } |
582 | |||
583 | /* Stoney supports 16bit resolution */ | ||
584 | if (asic_type == CHIP_STONEY) { | ||
585 | val = acp_reg_read(acp_mmio, mmACP_I2S_16BIT_RESOLUTION_EN); | ||
586 | val |= 0x03; | ||
587 | acp_reg_write(val, acp_mmio, mmACP_I2S_16BIT_RESOLUTION_EN); | ||
588 | } | ||
589 | return 0; | 582 | return 0; |
590 | } | 583 | } |
591 | 584 | ||
@@ -774,6 +767,7 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream, | |||
774 | { | 767 | { |
775 | int status; | 768 | int status; |
776 | uint64_t size; | 769 | uint64_t size; |
770 | u32 val = 0; | ||
777 | struct page *pg; | 771 | struct page *pg; |
778 | struct snd_pcm_runtime *runtime; | 772 | struct snd_pcm_runtime *runtime; |
779 | struct audio_substream_data *rtd; | 773 | struct audio_substream_data *rtd; |
@@ -786,6 +780,14 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream, | |||
786 | if (WARN_ON(!rtd)) | 780 | if (WARN_ON(!rtd)) |
787 | return -EINVAL; | 781 | return -EINVAL; |
788 | 782 | ||
783 | if (adata->asic_type == CHIP_STONEY) { | ||
784 | val = acp_reg_read(adata->acp_mmio, mmACP_I2S_16BIT_RESOLUTION_EN); | ||
785 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | ||
786 | val |= ACP_I2S_SP_16BIT_RESOLUTION_EN; | ||
787 | else | ||
788 | val |= ACP_I2S_MIC_16BIT_RESOLUTION_EN; | ||
789 | acp_reg_write(val, adata->acp_mmio, mmACP_I2S_16BIT_RESOLUTION_EN); | ||
790 | } | ||
789 | size = params_buffer_bytes(params); | 791 | size = params_buffer_bytes(params); |
790 | status = snd_pcm_lib_malloc_pages(substream, size); | 792 | status = snd_pcm_lib_malloc_pages(substream, size); |
791 | if (status < 0) | 793 | if (status < 0) |
diff --git a/sound/soc/amd/acp.h b/sound/soc/amd/acp.h index ecb458935d1e..9293f179f272 100644 --- a/sound/soc/amd/acp.h +++ b/sound/soc/amd/acp.h | |||
@@ -70,6 +70,8 @@ | |||
70 | #define CAPTURE_END_DMA_DESCR_CH15 7 | 70 | #define CAPTURE_END_DMA_DESCR_CH15 7 |
71 | 71 | ||
72 | #define mmACP_I2S_16BIT_RESOLUTION_EN 0x5209 | 72 | #define mmACP_I2S_16BIT_RESOLUTION_EN 0x5209 |
73 | #define ACP_I2S_MIC_16BIT_RESOLUTION_EN 0x01 | ||
74 | #define ACP_I2S_SP_16BIT_RESOLUTION_EN 0x02 | ||
73 | enum acp_dma_priority_level { | 75 | enum acp_dma_priority_level { |
74 | /* 0x0 Specifies the DMA channel is given normal priority */ | 76 | /* 0x0 Specifies the DMA channel is given normal priority */ |
75 | ACP_DMA_PRIORITY_LEVEL_NORMAL = 0x0, | 77 | ACP_DMA_PRIORITY_LEVEL_NORMAL = 0x0, |
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index 5672e516bec3..c1830ccd3bb8 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c | |||
@@ -798,12 +798,7 @@ static int hdmi_codec_probe(struct platform_device *pdev) | |||
798 | 798 | ||
799 | static int hdmi_codec_remove(struct platform_device *pdev) | 799 | static int hdmi_codec_remove(struct platform_device *pdev) |
800 | { | 800 | { |
801 | struct device *dev = &pdev->dev; | 801 | snd_soc_unregister_codec(&pdev->dev); |
802 | struct hdmi_codec_priv *hcp; | ||
803 | |||
804 | hcp = dev_get_drvdata(dev); | ||
805 | kfree(hcp->chmap_info); | ||
806 | snd_soc_unregister_codec(dev); | ||
807 | 802 | ||
808 | return 0; | 803 | return 0; |
809 | } | 804 | } |
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c index 831b297978a4..45a73049cf64 100644 --- a/sound/soc/codecs/rt5651.c +++ b/sound/soc/codecs/rt5651.c | |||
@@ -1722,6 +1722,7 @@ static const struct regmap_config rt5651_regmap = { | |||
1722 | .num_reg_defaults = ARRAY_SIZE(rt5651_reg), | 1722 | .num_reg_defaults = ARRAY_SIZE(rt5651_reg), |
1723 | .ranges = rt5651_ranges, | 1723 | .ranges = rt5651_ranges, |
1724 | .num_ranges = ARRAY_SIZE(rt5651_ranges), | 1724 | .num_ranges = ARRAY_SIZE(rt5651_ranges), |
1725 | .use_single_rw = true, | ||
1725 | }; | 1726 | }; |
1726 | 1727 | ||
1727 | #if defined(CONFIG_OF) | 1728 | #if defined(CONFIG_OF) |
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index e1ab5537d27a..c5c76ab8ccf1 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c | |||
@@ -529,10 +529,15 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = { | |||
529 | static int sgtl5000_digital_mute(struct snd_soc_dai *codec_dai, int mute) | 529 | static int sgtl5000_digital_mute(struct snd_soc_dai *codec_dai, int mute) |
530 | { | 530 | { |
531 | struct snd_soc_codec *codec = codec_dai->codec; | 531 | struct snd_soc_codec *codec = codec_dai->codec; |
532 | u16 adcdac_ctrl = SGTL5000_DAC_MUTE_LEFT | SGTL5000_DAC_MUTE_RIGHT; | 532 | u16 i2s_pwr = SGTL5000_I2S_IN_POWERUP; |
533 | 533 | ||
534 | snd_soc_update_bits(codec, SGTL5000_CHIP_ADCDAC_CTRL, | 534 | /* |
535 | adcdac_ctrl, mute ? adcdac_ctrl : 0); | 535 | * During 'digital mute' do not mute DAC |
536 | * because LINE_IN would be muted aswell. We want to mute | ||
537 | * only I2S block - this can be done by powering it off | ||
538 | */ | ||
539 | snd_soc_update_bits(codec, SGTL5000_CHIP_DIG_POWER, | ||
540 | i2s_pwr, mute ? 0 : i2s_pwr); | ||
536 | 541 | ||
537 | return 0; | 542 | return 0; |
538 | } | 543 | } |
@@ -871,15 +876,26 @@ static int sgtl5000_pcm_hw_params(struct snd_pcm_substream *substream, | |||
871 | static int sgtl5000_set_bias_level(struct snd_soc_codec *codec, | 876 | static int sgtl5000_set_bias_level(struct snd_soc_codec *codec, |
872 | enum snd_soc_bias_level level) | 877 | enum snd_soc_bias_level level) |
873 | { | 878 | { |
879 | struct sgtl5000_priv *sgtl = snd_soc_codec_get_drvdata(codec); | ||
880 | int ret; | ||
881 | |||
874 | switch (level) { | 882 | switch (level) { |
875 | case SND_SOC_BIAS_ON: | 883 | case SND_SOC_BIAS_ON: |
876 | case SND_SOC_BIAS_PREPARE: | 884 | case SND_SOC_BIAS_PREPARE: |
877 | case SND_SOC_BIAS_STANDBY: | 885 | case SND_SOC_BIAS_STANDBY: |
886 | regcache_cache_only(sgtl->regmap, false); | ||
887 | ret = regcache_sync(sgtl->regmap); | ||
888 | if (ret) { | ||
889 | regcache_cache_only(sgtl->regmap, true); | ||
890 | return ret; | ||
891 | } | ||
892 | |||
878 | snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, | 893 | snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, |
879 | SGTL5000_REFTOP_POWERUP, | 894 | SGTL5000_REFTOP_POWERUP, |
880 | SGTL5000_REFTOP_POWERUP); | 895 | SGTL5000_REFTOP_POWERUP); |
881 | break; | 896 | break; |
882 | case SND_SOC_BIAS_OFF: | 897 | case SND_SOC_BIAS_OFF: |
898 | regcache_cache_only(sgtl->regmap, true); | ||
883 | snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, | 899 | snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, |
884 | SGTL5000_REFTOP_POWERUP, 0); | 900 | SGTL5000_REFTOP_POWERUP, 0); |
885 | break; | 901 | break; |
@@ -1237,6 +1253,10 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) | |||
1237 | */ | 1253 | */ |
1238 | snd_soc_write(codec, SGTL5000_DAP_CTRL, 0); | 1254 | snd_soc_write(codec, SGTL5000_DAP_CTRL, 0); |
1239 | 1255 | ||
1256 | /* Unmute DAC after start */ | ||
1257 | snd_soc_update_bits(codec, SGTL5000_CHIP_ADCDAC_CTRL, | ||
1258 | SGTL5000_DAC_MUTE_LEFT | SGTL5000_DAC_MUTE_RIGHT, 0); | ||
1259 | |||
1240 | return 0; | 1260 | return 0; |
1241 | 1261 | ||
1242 | err: | 1262 | err: |
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 66e32f5d2917..989d093abda7 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c | |||
@@ -1204,12 +1204,14 @@ static int wmfw_add_ctl(struct wm_adsp *dsp, struct wm_coeff_ctl *ctl) | |||
1204 | kcontrol->put = wm_coeff_put_acked; | 1204 | kcontrol->put = wm_coeff_put_acked; |
1205 | break; | 1205 | break; |
1206 | default: | 1206 | default: |
1207 | kcontrol->get = wm_coeff_get; | 1207 | if (kcontrol->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { |
1208 | kcontrol->put = wm_coeff_put; | 1208 | ctl->bytes_ext.max = ctl->len; |
1209 | 1209 | ctl->bytes_ext.get = wm_coeff_tlv_get; | |
1210 | ctl->bytes_ext.max = ctl->len; | 1210 | ctl->bytes_ext.put = wm_coeff_tlv_put; |
1211 | ctl->bytes_ext.get = wm_coeff_tlv_get; | 1211 | } else { |
1212 | ctl->bytes_ext.put = wm_coeff_tlv_put; | 1212 | kcontrol->get = wm_coeff_get; |
1213 | kcontrol->put = wm_coeff_put; | ||
1214 | } | ||
1213 | break; | 1215 | break; |
1214 | } | 1216 | } |
1215 | 1217 | ||
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c index dca1143c1150..a4aa931ebfae 100644 --- a/sound/soc/sunxi/sun4i-i2s.c +++ b/sound/soc/sunxi/sun4i-i2s.c | |||
@@ -104,7 +104,7 @@ | |||
104 | 104 | ||
105 | #define SUN8I_I2S_CHAN_CFG_REG 0x30 | 105 | #define SUN8I_I2S_CHAN_CFG_REG 0x30 |
106 | #define SUN8I_I2S_CHAN_CFG_RX_SLOT_NUM_MASK GENMASK(6, 4) | 106 | #define SUN8I_I2S_CHAN_CFG_RX_SLOT_NUM_MASK GENMASK(6, 4) |
107 | #define SUN8I_I2S_CHAN_CFG_RX_SLOT_NUM(chan) (chan - 1) | 107 | #define SUN8I_I2S_CHAN_CFG_RX_SLOT_NUM(chan) ((chan - 1) << 4) |
108 | #define SUN8I_I2S_CHAN_CFG_TX_SLOT_NUM_MASK GENMASK(2, 0) | 108 | #define SUN8I_I2S_CHAN_CFG_TX_SLOT_NUM_MASK GENMASK(2, 0) |
109 | #define SUN8I_I2S_CHAN_CFG_TX_SLOT_NUM(chan) (chan - 1) | 109 | #define SUN8I_I2S_CHAN_CFG_TX_SLOT_NUM(chan) (chan - 1) |
110 | 110 | ||
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 50252046b01d..754e632a27bd 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h | |||
@@ -3325,4 +3325,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), | |||
3325 | } | 3325 | } |
3326 | }, | 3326 | }, |
3327 | 3327 | ||
3328 | { | ||
3329 | /* | ||
3330 | * Bower's & Wilkins PX headphones only support the 48 kHz sample rate | ||
3331 | * even though it advertises more. The capture interface doesn't work | ||
3332 | * even on windows. | ||
3333 | */ | ||
3334 | USB_DEVICE(0x19b5, 0x0021), | ||
3335 | .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { | ||
3336 | .ifnum = QUIRK_ANY_INTERFACE, | ||
3337 | .type = QUIRK_COMPOSITE, | ||
3338 | .data = (const struct snd_usb_audio_quirk[]) { | ||
3339 | { | ||
3340 | .ifnum = 0, | ||
3341 | .type = QUIRK_AUDIO_STANDARD_MIXER, | ||
3342 | }, | ||
3343 | /* Capture */ | ||
3344 | { | ||
3345 | .ifnum = 1, | ||
3346 | .type = QUIRK_IGNORE_INTERFACE, | ||
3347 | }, | ||
3348 | /* Playback */ | ||
3349 | { | ||
3350 | .ifnum = 2, | ||
3351 | .type = QUIRK_AUDIO_FIXED_ENDPOINT, | ||
3352 | .data = &(const struct audioformat) { | ||
3353 | .formats = SNDRV_PCM_FMTBIT_S16_LE, | ||
3354 | .channels = 2, | ||
3355 | .iface = 2, | ||
3356 | .altsetting = 1, | ||
3357 | .altset_idx = 1, | ||
3358 | .attributes = UAC_EP_CS_ATTR_FILL_MAX | | ||
3359 | UAC_EP_CS_ATTR_SAMPLE_RATE, | ||
3360 | .endpoint = 0x03, | ||
3361 | .ep_attr = USB_ENDPOINT_XFER_ISOC, | ||
3362 | .rates = SNDRV_PCM_RATE_48000, | ||
3363 | .rate_min = 48000, | ||
3364 | .rate_max = 48000, | ||
3365 | .nr_rates = 1, | ||
3366 | .rate_table = (unsigned int[]) { | ||
3367 | 48000 | ||
3368 | } | ||
3369 | } | ||
3370 | }, | ||
3371 | } | ||
3372 | } | ||
3373 | }, | ||
3374 | |||
3328 | #undef USB_DEVICE_VENDOR_SPEC | 3375 | #undef USB_DEVICE_VENDOR_SPEC |
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c index a0951505c7f5..4ed9d0c41843 100644 --- a/sound/x86/intel_hdmi_audio.c +++ b/sound/x86/intel_hdmi_audio.c | |||
@@ -50,6 +50,7 @@ | |||
50 | /*standard module options for ALSA. This module supports only one card*/ | 50 | /*standard module options for ALSA. This module supports only one card*/ |
51 | static int hdmi_card_index = SNDRV_DEFAULT_IDX1; | 51 | static int hdmi_card_index = SNDRV_DEFAULT_IDX1; |
52 | static char *hdmi_card_id = SNDRV_DEFAULT_STR1; | 52 | static char *hdmi_card_id = SNDRV_DEFAULT_STR1; |
53 | static bool single_port; | ||
53 | 54 | ||
54 | module_param_named(index, hdmi_card_index, int, 0444); | 55 | module_param_named(index, hdmi_card_index, int, 0444); |
55 | MODULE_PARM_DESC(index, | 56 | MODULE_PARM_DESC(index, |
@@ -57,6 +58,9 @@ MODULE_PARM_DESC(index, | |||
57 | module_param_named(id, hdmi_card_id, charp, 0444); | 58 | module_param_named(id, hdmi_card_id, charp, 0444); |
58 | MODULE_PARM_DESC(id, | 59 | MODULE_PARM_DESC(id, |
59 | "ID string for INTEL Intel HDMI Audio controller."); | 60 | "ID string for INTEL Intel HDMI Audio controller."); |
61 | module_param(single_port, bool, 0444); | ||
62 | MODULE_PARM_DESC(single_port, | ||
63 | "Single-port mode (for compatibility)"); | ||
60 | 64 | ||
61 | /* | 65 | /* |
62 | * ELD SA bits in the CEA Speaker Allocation data block | 66 | * ELD SA bits in the CEA Speaker Allocation data block |
@@ -1579,7 +1583,11 @@ static irqreturn_t display_pipe_interrupt_handler(int irq, void *dev_id) | |||
1579 | static void notify_audio_lpe(struct platform_device *pdev, int port) | 1583 | static void notify_audio_lpe(struct platform_device *pdev, int port) |
1580 | { | 1584 | { |
1581 | struct snd_intelhad_card *card_ctx = platform_get_drvdata(pdev); | 1585 | struct snd_intelhad_card *card_ctx = platform_get_drvdata(pdev); |
1582 | struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port]; | 1586 | struct snd_intelhad *ctx; |
1587 | |||
1588 | ctx = &card_ctx->pcm_ctx[single_port ? 0 : port]; | ||
1589 | if (single_port) | ||
1590 | ctx->port = port; | ||
1583 | 1591 | ||
1584 | schedule_work(&ctx->hdmi_audio_wq); | 1592 | schedule_work(&ctx->hdmi_audio_wq); |
1585 | } | 1593 | } |
@@ -1743,6 +1751,7 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev) | |||
1743 | { | 1751 | { |
1744 | struct snd_card *card; | 1752 | struct snd_card *card; |
1745 | struct snd_intelhad_card *card_ctx; | 1753 | struct snd_intelhad_card *card_ctx; |
1754 | struct snd_intelhad *ctx; | ||
1746 | struct snd_pcm *pcm; | 1755 | struct snd_pcm *pcm; |
1747 | struct intel_hdmi_lpe_audio_pdata *pdata; | 1756 | struct intel_hdmi_lpe_audio_pdata *pdata; |
1748 | int irq; | 1757 | int irq; |
@@ -1787,6 +1796,21 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev) | |||
1787 | 1796 | ||
1788 | platform_set_drvdata(pdev, card_ctx); | 1797 | platform_set_drvdata(pdev, card_ctx); |
1789 | 1798 | ||
1799 | card_ctx->num_pipes = pdata->num_pipes; | ||
1800 | card_ctx->num_ports = single_port ? 1 : pdata->num_ports; | ||
1801 | |||
1802 | for_each_port(card_ctx, port) { | ||
1803 | ctx = &card_ctx->pcm_ctx[port]; | ||
1804 | ctx->card_ctx = card_ctx; | ||
1805 | ctx->dev = card_ctx->dev; | ||
1806 | ctx->port = single_port ? -1 : port; | ||
1807 | ctx->pipe = -1; | ||
1808 | |||
1809 | spin_lock_init(&ctx->had_spinlock); | ||
1810 | mutex_init(&ctx->mutex); | ||
1811 | INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq); | ||
1812 | } | ||
1813 | |||
1790 | dev_dbg(&pdev->dev, "%s: mmio_start = 0x%x, mmio_end = 0x%x\n", | 1814 | dev_dbg(&pdev->dev, "%s: mmio_start = 0x%x, mmio_end = 0x%x\n", |
1791 | __func__, (unsigned int)res_mmio->start, | 1815 | __func__, (unsigned int)res_mmio->start, |
1792 | (unsigned int)res_mmio->end); | 1816 | (unsigned int)res_mmio->end); |
@@ -1816,19 +1840,12 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev) | |||
1816 | init_channel_allocations(); | 1840 | init_channel_allocations(); |
1817 | 1841 | ||
1818 | card_ctx->num_pipes = pdata->num_pipes; | 1842 | card_ctx->num_pipes = pdata->num_pipes; |
1819 | card_ctx->num_ports = pdata->num_ports; | 1843 | card_ctx->num_ports = single_port ? 1 : pdata->num_ports; |
1820 | 1844 | ||
1821 | for_each_port(card_ctx, port) { | 1845 | for_each_port(card_ctx, port) { |
1822 | struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port]; | ||
1823 | int i; | 1846 | int i; |
1824 | 1847 | ||
1825 | ctx->card_ctx = card_ctx; | 1848 | ctx = &card_ctx->pcm_ctx[port]; |
1826 | ctx->dev = card_ctx->dev; | ||
1827 | ctx->port = port; | ||
1828 | ctx->pipe = -1; | ||
1829 | |||
1830 | INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq); | ||
1831 | |||
1832 | ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS, | 1849 | ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS, |
1833 | MAX_CAP_STREAMS, &pcm); | 1850 | MAX_CAP_STREAMS, &pcm); |
1834 | if (ret) | 1851 | if (ret) |
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 0dfe4d3f74e2..f41079da38c5 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h | |||
@@ -213,6 +213,7 @@ | |||
213 | #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ | 213 | #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ |
214 | 214 | ||
215 | #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ | 215 | #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ |
216 | #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ | ||
216 | 217 | ||
217 | /* Virtualization flags: Linux defined, word 8 */ | 218 | /* Virtualization flags: Linux defined, word 8 */ |
218 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ | 219 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 0fb5ef939732..7b26d4b0b052 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h | |||
@@ -761,6 +761,7 @@ struct kvm_ppc_resize_hpt { | |||
761 | #define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 | 761 | #define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 |
762 | #define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 | 762 | #define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 |
763 | #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) | 763 | #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) |
764 | #define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list) | ||
764 | 765 | ||
765 | /* | 766 | /* |
766 | * Extension capability list. | 767 | * Extension capability list. |
@@ -934,6 +935,7 @@ struct kvm_ppc_resize_hpt { | |||
934 | #define KVM_CAP_S390_AIS_MIGRATION 150 | 935 | #define KVM_CAP_S390_AIS_MIGRATION 150 |
935 | #define KVM_CAP_PPC_GET_CPU_CHAR 151 | 936 | #define KVM_CAP_PPC_GET_CPU_CHAR 151 |
936 | #define KVM_CAP_S390_BPB 152 | 937 | #define KVM_CAP_S390_BPB 152 |
938 | #define KVM_CAP_GET_MSR_FEATURES 153 | ||
937 | 939 | ||
938 | #ifdef KVM_CAP_IRQ_ROUTING | 940 | #ifdef KVM_CAP_IRQ_ROUTING |
939 | 941 | ||
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index a5684d0968b4..5898c22ba310 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat | |||
@@ -33,7 +33,7 @@ import resource | |||
33 | import struct | 33 | import struct |
34 | import re | 34 | import re |
35 | import subprocess | 35 | import subprocess |
36 | from collections import defaultdict | 36 | from collections import defaultdict, namedtuple |
37 | 37 | ||
38 | VMX_EXIT_REASONS = { | 38 | VMX_EXIT_REASONS = { |
39 | 'EXCEPTION_NMI': 0, | 39 | 'EXCEPTION_NMI': 0, |
@@ -228,6 +228,7 @@ IOCTL_NUMBERS = { | |||
228 | } | 228 | } |
229 | 229 | ||
230 | ENCODING = locale.getpreferredencoding(False) | 230 | ENCODING = locale.getpreferredencoding(False) |
231 | TRACE_FILTER = re.compile(r'^[^\(]*$') | ||
231 | 232 | ||
232 | 233 | ||
233 | class Arch(object): | 234 | class Arch(object): |
@@ -260,6 +261,11 @@ class Arch(object): | |||
260 | return ArchX86(SVM_EXIT_REASONS) | 261 | return ArchX86(SVM_EXIT_REASONS) |
261 | return | 262 | return |
262 | 263 | ||
264 | def tracepoint_is_child(self, field): | ||
265 | if (TRACE_FILTER.match(field)): | ||
266 | return None | ||
267 | return field.split('(', 1)[0] | ||
268 | |||
263 | 269 | ||
264 | class ArchX86(Arch): | 270 | class ArchX86(Arch): |
265 | def __init__(self, exit_reasons): | 271 | def __init__(self, exit_reasons): |
@@ -267,6 +273,10 @@ class ArchX86(Arch): | |||
267 | self.ioctl_numbers = IOCTL_NUMBERS | 273 | self.ioctl_numbers = IOCTL_NUMBERS |
268 | self.exit_reasons = exit_reasons | 274 | self.exit_reasons = exit_reasons |
269 | 275 | ||
276 | def debugfs_is_child(self, field): | ||
277 | """ Returns name of parent if 'field' is a child, None otherwise """ | ||
278 | return None | ||
279 | |||
270 | 280 | ||
271 | class ArchPPC(Arch): | 281 | class ArchPPC(Arch): |
272 | def __init__(self): | 282 | def __init__(self): |
@@ -282,6 +292,10 @@ class ArchPPC(Arch): | |||
282 | self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16 | 292 | self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16 |
283 | self.exit_reasons = {} | 293 | self.exit_reasons = {} |
284 | 294 | ||
295 | def debugfs_is_child(self, field): | ||
296 | """ Returns name of parent if 'field' is a child, None otherwise """ | ||
297 | return None | ||
298 | |||
285 | 299 | ||
286 | class ArchA64(Arch): | 300 | class ArchA64(Arch): |
287 | def __init__(self): | 301 | def __init__(self): |
@@ -289,6 +303,10 @@ class ArchA64(Arch): | |||
289 | self.ioctl_numbers = IOCTL_NUMBERS | 303 | self.ioctl_numbers = IOCTL_NUMBERS |
290 | self.exit_reasons = AARCH64_EXIT_REASONS | 304 | self.exit_reasons = AARCH64_EXIT_REASONS |
291 | 305 | ||
306 | def debugfs_is_child(self, field): | ||
307 | """ Returns name of parent if 'field' is a child, None otherwise """ | ||
308 | return None | ||
309 | |||
292 | 310 | ||
293 | class ArchS390(Arch): | 311 | class ArchS390(Arch): |
294 | def __init__(self): | 312 | def __init__(self): |
@@ -296,6 +314,12 @@ class ArchS390(Arch): | |||
296 | self.ioctl_numbers = IOCTL_NUMBERS | 314 | self.ioctl_numbers = IOCTL_NUMBERS |
297 | self.exit_reasons = None | 315 | self.exit_reasons = None |
298 | 316 | ||
317 | def debugfs_is_child(self, field): | ||
318 | """ Returns name of parent if 'field' is a child, None otherwise """ | ||
319 | if field.startswith('instruction_'): | ||
320 | return 'exit_instruction' | ||
321 | |||
322 | |||
299 | ARCH = Arch.get_arch() | 323 | ARCH = Arch.get_arch() |
300 | 324 | ||
301 | 325 | ||
@@ -331,9 +355,6 @@ class perf_event_attr(ctypes.Structure): | |||
331 | PERF_TYPE_TRACEPOINT = 2 | 355 | PERF_TYPE_TRACEPOINT = 2 |
332 | PERF_FORMAT_GROUP = 1 << 3 | 356 | PERF_FORMAT_GROUP = 1 << 3 |
333 | 357 | ||
334 | PATH_DEBUGFS_TRACING = '/sys/kernel/debug/tracing' | ||
335 | PATH_DEBUGFS_KVM = '/sys/kernel/debug/kvm' | ||
336 | |||
337 | 358 | ||
338 | class Group(object): | 359 | class Group(object): |
339 | """Represents a perf event group.""" | 360 | """Represents a perf event group.""" |
@@ -376,8 +397,8 @@ class Event(object): | |||
376 | self.syscall = self.libc.syscall | 397 | self.syscall = self.libc.syscall |
377 | self.name = name | 398 | self.name = name |
378 | self.fd = None | 399 | self.fd = None |
379 | self.setup_event(group, trace_cpu, trace_pid, trace_point, | 400 | self._setup_event(group, trace_cpu, trace_pid, trace_point, |
380 | trace_filter, trace_set) | 401 | trace_filter, trace_set) |
381 | 402 | ||
382 | def __del__(self): | 403 | def __del__(self): |
383 | """Closes the event's file descriptor. | 404 | """Closes the event's file descriptor. |
@@ -390,7 +411,7 @@ class Event(object): | |||
390 | if self.fd: | 411 | if self.fd: |
391 | os.close(self.fd) | 412 | os.close(self.fd) |
392 | 413 | ||
393 | def perf_event_open(self, attr, pid, cpu, group_fd, flags): | 414 | def _perf_event_open(self, attr, pid, cpu, group_fd, flags): |
394 | """Wrapper for the sys_perf_evt_open() syscall. | 415 | """Wrapper for the sys_perf_evt_open() syscall. |
395 | 416 | ||
396 | Used to set up performance events, returns a file descriptor or -1 | 417 | Used to set up performance events, returns a file descriptor or -1 |
@@ -409,7 +430,7 @@ class Event(object): | |||
409 | ctypes.c_int(pid), ctypes.c_int(cpu), | 430 | ctypes.c_int(pid), ctypes.c_int(cpu), |
410 | ctypes.c_int(group_fd), ctypes.c_long(flags)) | 431 | ctypes.c_int(group_fd), ctypes.c_long(flags)) |
411 | 432 | ||
412 | def setup_event_attribute(self, trace_set, trace_point): | 433 | def _setup_event_attribute(self, trace_set, trace_point): |
413 | """Returns an initialized ctype perf_event_attr struct.""" | 434 | """Returns an initialized ctype perf_event_attr struct.""" |
414 | 435 | ||
415 | id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set, | 436 | id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set, |
@@ -419,8 +440,8 @@ class Event(object): | |||
419 | event_attr.config = int(open(id_path).read()) | 440 | event_attr.config = int(open(id_path).read()) |
420 | return event_attr | 441 | return event_attr |
421 | 442 | ||
422 | def setup_event(self, group, trace_cpu, trace_pid, trace_point, | 443 | def _setup_event(self, group, trace_cpu, trace_pid, trace_point, |
423 | trace_filter, trace_set): | 444 | trace_filter, trace_set): |
424 | """Sets up the perf event in Linux. | 445 | """Sets up the perf event in Linux. |
425 | 446 | ||
426 | Issues the syscall to register the event in the kernel and | 447 | Issues the syscall to register the event in the kernel and |
@@ -428,7 +449,7 @@ class Event(object): | |||
428 | 449 | ||
429 | """ | 450 | """ |
430 | 451 | ||
431 | event_attr = self.setup_event_attribute(trace_set, trace_point) | 452 | event_attr = self._setup_event_attribute(trace_set, trace_point) |
432 | 453 | ||
433 | # First event will be group leader. | 454 | # First event will be group leader. |
434 | group_leader = -1 | 455 | group_leader = -1 |
@@ -437,8 +458,8 @@ class Event(object): | |||
437 | if group.events: | 458 | if group.events: |
438 | group_leader = group.events[0].fd | 459 | group_leader = group.events[0].fd |
439 | 460 | ||
440 | fd = self.perf_event_open(event_attr, trace_pid, | 461 | fd = self._perf_event_open(event_attr, trace_pid, |
441 | trace_cpu, group_leader, 0) | 462 | trace_cpu, group_leader, 0) |
442 | if fd == -1: | 463 | if fd == -1: |
443 | err = ctypes.get_errno() | 464 | err = ctypes.get_errno() |
444 | raise OSError(err, os.strerror(err), | 465 | raise OSError(err, os.strerror(err), |
@@ -475,6 +496,10 @@ class Event(object): | |||
475 | 496 | ||
476 | class Provider(object): | 497 | class Provider(object): |
477 | """Encapsulates functionalities used by all providers.""" | 498 | """Encapsulates functionalities used by all providers.""" |
499 | def __init__(self, pid): | ||
500 | self.child_events = False | ||
501 | self.pid = pid | ||
502 | |||
478 | @staticmethod | 503 | @staticmethod |
479 | def is_field_wanted(fields_filter, field): | 504 | def is_field_wanted(fields_filter, field): |
480 | """Indicate whether field is valid according to fields_filter.""" | 505 | """Indicate whether field is valid according to fields_filter.""" |
@@ -500,12 +525,12 @@ class TracepointProvider(Provider): | |||
500 | """ | 525 | """ |
501 | def __init__(self, pid, fields_filter): | 526 | def __init__(self, pid, fields_filter): |
502 | self.group_leaders = [] | 527 | self.group_leaders = [] |
503 | self.filters = self.get_filters() | 528 | self.filters = self._get_filters() |
504 | self.update_fields(fields_filter) | 529 | self.update_fields(fields_filter) |
505 | self.pid = pid | 530 | super(TracepointProvider, self).__init__(pid) |
506 | 531 | ||
507 | @staticmethod | 532 | @staticmethod |
508 | def get_filters(): | 533 | def _get_filters(): |
509 | """Returns a dict of trace events, their filter ids and | 534 | """Returns a dict of trace events, their filter ids and |
510 | the values that can be filtered. | 535 | the values that can be filtered. |
511 | 536 | ||
@@ -521,8 +546,8 @@ class TracepointProvider(Provider): | |||
521 | filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons) | 546 | filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons) |
522 | return filters | 547 | return filters |
523 | 548 | ||
524 | def get_available_fields(self): | 549 | def _get_available_fields(self): |
525 | """Returns a list of available event's of format 'event name(filter | 550 | """Returns a list of available events of format 'event name(filter |
526 | name)'. | 551 | name)'. |
527 | 552 | ||
528 | All available events have directories under | 553 | All available events have directories under |
@@ -549,11 +574,12 @@ class TracepointProvider(Provider): | |||
549 | 574 | ||
550 | def update_fields(self, fields_filter): | 575 | def update_fields(self, fields_filter): |
551 | """Refresh fields, applying fields_filter""" | 576 | """Refresh fields, applying fields_filter""" |
552 | self.fields = [field for field in self.get_available_fields() | 577 | self.fields = [field for field in self._get_available_fields() |
553 | if self.is_field_wanted(fields_filter, field)] | 578 | if self.is_field_wanted(fields_filter, field) or |
579 | ARCH.tracepoint_is_child(field)] | ||
554 | 580 | ||
555 | @staticmethod | 581 | @staticmethod |
556 | def get_online_cpus(): | 582 | def _get_online_cpus(): |
557 | """Returns a list of cpu id integers.""" | 583 | """Returns a list of cpu id integers.""" |
558 | def parse_int_list(list_string): | 584 | def parse_int_list(list_string): |
559 | """Returns an int list from a string of comma separated integers and | 585 | """Returns an int list from a string of comma separated integers and |
@@ -575,17 +601,17 @@ class TracepointProvider(Provider): | |||
575 | cpu_string = cpu_list.readline() | 601 | cpu_string = cpu_list.readline() |
576 | return parse_int_list(cpu_string) | 602 | return parse_int_list(cpu_string) |
577 | 603 | ||
578 | def setup_traces(self): | 604 | def _setup_traces(self): |
579 | """Creates all event and group objects needed to be able to retrieve | 605 | """Creates all event and group objects needed to be able to retrieve |
580 | data.""" | 606 | data.""" |
581 | fields = self.get_available_fields() | 607 | fields = self._get_available_fields() |
582 | if self._pid > 0: | 608 | if self._pid > 0: |
583 | # Fetch list of all threads of the monitored pid, as qemu | 609 | # Fetch list of all threads of the monitored pid, as qemu |
584 | # starts a thread for each vcpu. | 610 | # starts a thread for each vcpu. |
585 | path = os.path.join('/proc', str(self._pid), 'task') | 611 | path = os.path.join('/proc', str(self._pid), 'task') |
586 | groupids = self.walkdir(path)[1] | 612 | groupids = self.walkdir(path)[1] |
587 | else: | 613 | else: |
588 | groupids = self.get_online_cpus() | 614 | groupids = self._get_online_cpus() |
589 | 615 | ||
590 | # The constant is needed as a buffer for python libs, std | 616 | # The constant is needed as a buffer for python libs, std |
591 | # streams and other files that the script opens. | 617 | # streams and other files that the script opens. |
@@ -663,7 +689,7 @@ class TracepointProvider(Provider): | |||
663 | # The garbage collector will get rid of all Event/Group | 689 | # The garbage collector will get rid of all Event/Group |
664 | # objects and open files after removing the references. | 690 | # objects and open files after removing the references. |
665 | self.group_leaders = [] | 691 | self.group_leaders = [] |
666 | self.setup_traces() | 692 | self._setup_traces() |
667 | self.fields = self._fields | 693 | self.fields = self._fields |
668 | 694 | ||
669 | def read(self, by_guest=0): | 695 | def read(self, by_guest=0): |
@@ -671,8 +697,12 @@ class TracepointProvider(Provider): | |||
671 | ret = defaultdict(int) | 697 | ret = defaultdict(int) |
672 | for group in self.group_leaders: | 698 | for group in self.group_leaders: |
673 | for name, val in group.read().items(): | 699 | for name, val in group.read().items(): |
674 | if name in self._fields: | 700 | if name not in self._fields: |
675 | ret[name] += val | 701 | continue |
702 | parent = ARCH.tracepoint_is_child(name) | ||
703 | if parent: | ||
704 | name += ' ' + parent | ||
705 | ret[name] += val | ||
676 | return ret | 706 | return ret |
677 | 707 | ||
678 | def reset(self): | 708 | def reset(self): |
@@ -690,11 +720,11 @@ class DebugfsProvider(Provider): | |||
690 | self._baseline = {} | 720 | self._baseline = {} |
691 | self.do_read = True | 721 | self.do_read = True |
692 | self.paths = [] | 722 | self.paths = [] |
693 | self.pid = pid | 723 | super(DebugfsProvider, self).__init__(pid) |
694 | if include_past: | 724 | if include_past: |
695 | self.restore() | 725 | self._restore() |
696 | 726 | ||
697 | def get_available_fields(self): | 727 | def _get_available_fields(self): |
698 | """"Returns a list of available fields. | 728 | """"Returns a list of available fields. |
699 | 729 | ||
700 | The fields are all available KVM debugfs files | 730 | The fields are all available KVM debugfs files |
@@ -704,8 +734,9 @@ class DebugfsProvider(Provider): | |||
704 | 734 | ||
705 | def update_fields(self, fields_filter): | 735 | def update_fields(self, fields_filter): |
706 | """Refresh fields, applying fields_filter""" | 736 | """Refresh fields, applying fields_filter""" |
707 | self._fields = [field for field in self.get_available_fields() | 737 | self._fields = [field for field in self._get_available_fields() |
708 | if self.is_field_wanted(fields_filter, field)] | 738 | if self.is_field_wanted(fields_filter, field) or |
739 | ARCH.debugfs_is_child(field)] | ||
709 | 740 | ||
710 | @property | 741 | @property |
711 | def fields(self): | 742 | def fields(self): |
@@ -758,7 +789,7 @@ class DebugfsProvider(Provider): | |||
758 | paths.append(dir) | 789 | paths.append(dir) |
759 | for path in paths: | 790 | for path in paths: |
760 | for field in self._fields: | 791 | for field in self._fields: |
761 | value = self.read_field(field, path) | 792 | value = self._read_field(field, path) |
762 | key = path + field | 793 | key = path + field |
763 | if reset == 1: | 794 | if reset == 1: |
764 | self._baseline[key] = value | 795 | self._baseline[key] = value |
@@ -766,20 +797,21 @@ class DebugfsProvider(Provider): | |||
766 | self._baseline[key] = 0 | 797 | self._baseline[key] = 0 |
767 | if self._baseline.get(key, -1) == -1: | 798 | if self._baseline.get(key, -1) == -1: |
768 | self._baseline[key] = value | 799 | self._baseline[key] = value |
769 | increment = (results.get(field, 0) + value - | 800 | parent = ARCH.debugfs_is_child(field) |
770 | self._baseline.get(key, 0)) | 801 | if parent: |
771 | if by_guest: | 802 | field = field + ' ' + parent |
772 | pid = key.split('-')[0] | 803 | else: |
773 | if pid in results: | 804 | if by_guest: |
774 | results[pid] += increment | 805 | field = key.split('-')[0] # set 'field' to 'pid' |
775 | else: | 806 | increment = value - self._baseline.get(key, 0) |
776 | results[pid] = increment | 807 | if field in results: |
808 | results[field] += increment | ||
777 | else: | 809 | else: |
778 | results[field] = increment | 810 | results[field] = increment |
779 | 811 | ||
780 | return results | 812 | return results |
781 | 813 | ||
782 | def read_field(self, field, path): | 814 | def _read_field(self, field, path): |
783 | """Returns the value of a single field from a specific VM.""" | 815 | """Returns the value of a single field from a specific VM.""" |
784 | try: | 816 | try: |
785 | return int(open(os.path.join(PATH_DEBUGFS_KVM, | 817 | return int(open(os.path.join(PATH_DEBUGFS_KVM, |
@@ -794,12 +826,15 @@ class DebugfsProvider(Provider): | |||
794 | self._baseline = {} | 826 | self._baseline = {} |
795 | self.read(1) | 827 | self.read(1) |
796 | 828 | ||
797 | def restore(self): | 829 | def _restore(self): |
798 | """Reset field counters""" | 830 | """Reset field counters""" |
799 | self._baseline = {} | 831 | self._baseline = {} |
800 | self.read(2) | 832 | self.read(2) |
801 | 833 | ||
802 | 834 | ||
835 | EventStat = namedtuple('EventStat', ['value', 'delta']) | ||
836 | |||
837 | |||
803 | class Stats(object): | 838 | class Stats(object): |
804 | """Manages the data providers and the data they provide. | 839 | """Manages the data providers and the data they provide. |
805 | 840 | ||
@@ -808,13 +843,13 @@ class Stats(object): | |||
808 | 843 | ||
809 | """ | 844 | """ |
810 | def __init__(self, options): | 845 | def __init__(self, options): |
811 | self.providers = self.get_providers(options) | 846 | self.providers = self._get_providers(options) |
812 | self._pid_filter = options.pid | 847 | self._pid_filter = options.pid |
813 | self._fields_filter = options.fields | 848 | self._fields_filter = options.fields |
814 | self.values = {} | 849 | self.values = {} |
850 | self._child_events = False | ||
815 | 851 | ||
816 | @staticmethod | 852 | def _get_providers(self, options): |
817 | def get_providers(options): | ||
818 | """Returns a list of data providers depending on the passed options.""" | 853 | """Returns a list of data providers depending on the passed options.""" |
819 | providers = [] | 854 | providers = [] |
820 | 855 | ||
@@ -826,7 +861,7 @@ class Stats(object): | |||
826 | 861 | ||
827 | return providers | 862 | return providers |
828 | 863 | ||
829 | def update_provider_filters(self): | 864 | def _update_provider_filters(self): |
830 | """Propagates fields filters to providers.""" | 865 | """Propagates fields filters to providers.""" |
831 | # As we reset the counters when updating the fields we can | 866 | # As we reset the counters when updating the fields we can |
832 | # also clear the cache of old values. | 867 | # also clear the cache of old values. |
@@ -847,7 +882,7 @@ class Stats(object): | |||
847 | def fields_filter(self, fields_filter): | 882 | def fields_filter(self, fields_filter): |
848 | if fields_filter != self._fields_filter: | 883 | if fields_filter != self._fields_filter: |
849 | self._fields_filter = fields_filter | 884 | self._fields_filter = fields_filter |
850 | self.update_provider_filters() | 885 | self._update_provider_filters() |
851 | 886 | ||
852 | @property | 887 | @property |
853 | def pid_filter(self): | 888 | def pid_filter(self): |
@@ -861,16 +896,33 @@ class Stats(object): | |||
861 | for provider in self.providers: | 896 | for provider in self.providers: |
862 | provider.pid = self._pid_filter | 897 | provider.pid = self._pid_filter |
863 | 898 | ||
899 | @property | ||
900 | def child_events(self): | ||
901 | return self._child_events | ||
902 | |||
903 | @child_events.setter | ||
904 | def child_events(self, val): | ||
905 | self._child_events = val | ||
906 | for provider in self.providers: | ||
907 | provider.child_events = val | ||
908 | |||
864 | def get(self, by_guest=0): | 909 | def get(self, by_guest=0): |
865 | """Returns a dict with field -> (value, delta to last value) of all | 910 | """Returns a dict with field -> (value, delta to last value) of all |
866 | provider data.""" | 911 | provider data. |
912 | Key formats: | ||
913 | * plain: 'key' is event name | ||
914 | * child-parent: 'key' is in format '<child> <parent>' | ||
915 | * pid: 'key' is the pid of the guest, and the record contains the | ||
916 | aggregated event data | ||
917 | These formats are generated by the providers, and handled in class TUI. | ||
918 | """ | ||
867 | for provider in self.providers: | 919 | for provider in self.providers: |
868 | new = provider.read(by_guest=by_guest) | 920 | new = provider.read(by_guest=by_guest) |
869 | for key in new if by_guest else provider.fields: | 921 | for key in new: |
870 | oldval = self.values.get(key, (0, 0))[0] | 922 | oldval = self.values.get(key, EventStat(0, 0)).value |
871 | newval = new.get(key, 0) | 923 | newval = new.get(key, 0) |
872 | newdelta = newval - oldval | 924 | newdelta = newval - oldval |
873 | self.values[key] = (newval, newdelta) | 925 | self.values[key] = EventStat(newval, newdelta) |
874 | return self.values | 926 | return self.values |
875 | 927 | ||
876 | def toggle_display_guests(self, to_pid): | 928 | def toggle_display_guests(self, to_pid): |
@@ -899,10 +951,10 @@ class Stats(object): | |||
899 | self.get(to_pid) | 951 | self.get(to_pid) |
900 | return 0 | 952 | return 0 |
901 | 953 | ||
954 | |||
902 | DELAY_DEFAULT = 3.0 | 955 | DELAY_DEFAULT = 3.0 |
903 | MAX_GUEST_NAME_LEN = 48 | 956 | MAX_GUEST_NAME_LEN = 48 |
904 | MAX_REGEX_LEN = 44 | 957 | MAX_REGEX_LEN = 44 |
905 | DEFAULT_REGEX = r'^[^\(]*$' | ||
906 | SORT_DEFAULT = 0 | 958 | SORT_DEFAULT = 0 |
907 | 959 | ||
908 | 960 | ||
@@ -969,7 +1021,7 @@ class Tui(object): | |||
969 | 1021 | ||
970 | return res | 1022 | return res |
971 | 1023 | ||
972 | def print_all_gnames(self, row): | 1024 | def _print_all_gnames(self, row): |
973 | """Print a list of all running guests along with their pids.""" | 1025 | """Print a list of all running guests along with their pids.""" |
974 | self.screen.addstr(row, 2, '%8s %-60s' % | 1026 | self.screen.addstr(row, 2, '%8s %-60s' % |
975 | ('Pid', 'Guest Name (fuzzy list, might be ' | 1027 | ('Pid', 'Guest Name (fuzzy list, might be ' |
@@ -1032,19 +1084,13 @@ class Tui(object): | |||
1032 | 1084 | ||
1033 | return name | 1085 | return name |
1034 | 1086 | ||
1035 | def update_drilldown(self): | 1087 | def _update_pid(self, pid): |
1036 | """Sets or removes a filter that only allows fields without braces.""" | ||
1037 | if not self.stats.fields_filter: | ||
1038 | self.stats.fields_filter = DEFAULT_REGEX | ||
1039 | |||
1040 | elif self.stats.fields_filter == DEFAULT_REGEX: | ||
1041 | self.stats.fields_filter = None | ||
1042 | |||
1043 | def update_pid(self, pid): | ||
1044 | """Propagates pid selection to stats object.""" | 1088 | """Propagates pid selection to stats object.""" |
1089 | self.screen.addstr(4, 1, 'Updating pid filter...') | ||
1090 | self.screen.refresh() | ||
1045 | self.stats.pid_filter = pid | 1091 | self.stats.pid_filter = pid |
1046 | 1092 | ||
1047 | def refresh_header(self, pid=None): | 1093 | def _refresh_header(self, pid=None): |
1048 | """Refreshes the header.""" | 1094 | """Refreshes the header.""" |
1049 | if pid is None: | 1095 | if pid is None: |
1050 | pid = self.stats.pid_filter | 1096 | pid = self.stats.pid_filter |
@@ -1059,8 +1105,7 @@ class Tui(object): | |||
1059 | .format(pid, gname), curses.A_BOLD) | 1105 | .format(pid, gname), curses.A_BOLD) |
1060 | else: | 1106 | else: |
1061 | self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD) | 1107 | self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD) |
1062 | if self.stats.fields_filter and self.stats.fields_filter \ | 1108 | if self.stats.fields_filter: |
1063 | != DEFAULT_REGEX: | ||
1064 | regex = self.stats.fields_filter | 1109 | regex = self.stats.fields_filter |
1065 | if len(regex) > MAX_REGEX_LEN: | 1110 | if len(regex) > MAX_REGEX_LEN: |
1066 | regex = regex[:MAX_REGEX_LEN] + '...' | 1111 | regex = regex[:MAX_REGEX_LEN] + '...' |
@@ -1075,56 +1120,99 @@ class Tui(object): | |||
1075 | self.screen.addstr(4, 1, 'Collecting data...') | 1120 | self.screen.addstr(4, 1, 'Collecting data...') |
1076 | self.screen.refresh() | 1121 | self.screen.refresh() |
1077 | 1122 | ||
1078 | def refresh_body(self, sleeptime): | 1123 | def _refresh_body(self, sleeptime): |
1124 | def is_child_field(field): | ||
1125 | return field.find('(') != -1 | ||
1126 | |||
1127 | def insert_child(sorted_items, child, values, parent): | ||
1128 | num = len(sorted_items) | ||
1129 | for i in range(0, num): | ||
1130 | # only add child if parent is present | ||
1131 | if parent.startswith(sorted_items[i][0]): | ||
1132 | sorted_items.insert(i + 1, (' ' + child, values)) | ||
1133 | |||
1134 | def get_sorted_events(self, stats): | ||
1135 | """ separate parent and child events """ | ||
1136 | if self._sorting == SORT_DEFAULT: | ||
1137 | def sortkey((_k, v)): | ||
1138 | # sort by (delta value, overall value) | ||
1139 | return (v.delta, v.value) | ||
1140 | else: | ||
1141 | def sortkey((_k, v)): | ||
1142 | # sort by overall value | ||
1143 | return v.value | ||
1144 | |||
1145 | childs = [] | ||
1146 | sorted_items = [] | ||
1147 | # we can't rule out child events to appear prior to parents even | ||
1148 | # when sorted - separate out all children first, and add in later | ||
1149 | for key, values in sorted(stats.items(), key=sortkey, | ||
1150 | reverse=True): | ||
1151 | if values == (0, 0): | ||
1152 | continue | ||
1153 | if key.find(' ') != -1: | ||
1154 | if not self.stats.child_events: | ||
1155 | continue | ||
1156 | childs.insert(0, (key, values)) | ||
1157 | else: | ||
1158 | sorted_items.append((key, values)) | ||
1159 | if self.stats.child_events: | ||
1160 | for key, values in childs: | ||
1161 | (child, parent) = key.split(' ') | ||
1162 | insert_child(sorted_items, child, values, parent) | ||
1163 | |||
1164 | return sorted_items | ||
1165 | |||
1079 | row = 3 | 1166 | row = 3 |
1080 | self.screen.move(row, 0) | 1167 | self.screen.move(row, 0) |
1081 | self.screen.clrtobot() | 1168 | self.screen.clrtobot() |
1082 | stats = self.stats.get(self._display_guests) | 1169 | stats = self.stats.get(self._display_guests) |
1083 | 1170 | total = 0. | |
1084 | def sortCurAvg(x): | 1171 | ctotal = 0. |
1085 | # sort by current events if available | 1172 | for key, values in stats.items(): |
1086 | if stats[x][1]: | 1173 | if self._display_guests: |
1087 | return (-stats[x][1], -stats[x][0]) | 1174 | if self.get_gname_from_pid(key): |
1175 | total += values.value | ||
1176 | continue | ||
1177 | if not key.find(' ') != -1: | ||
1178 | total += values.value | ||
1088 | else: | 1179 | else: |
1089 | return (0, -stats[x][0]) | 1180 | ctotal += values.value |
1181 | if total == 0.: | ||
1182 | # we don't have any fields, or all non-child events are filtered | ||
1183 | total = ctotal | ||
1090 | 1184 | ||
1091 | def sortTotal(x): | 1185 | # print events |
1092 | # sort by totals | ||
1093 | return (0, -stats[x][0]) | ||
1094 | total = 0. | ||
1095 | for key in stats.keys(): | ||
1096 | if key.find('(') is -1: | ||
1097 | total += stats[key][0] | ||
1098 | if self._sorting == SORT_DEFAULT: | ||
1099 | sortkey = sortCurAvg | ||
1100 | else: | ||
1101 | sortkey = sortTotal | ||
1102 | tavg = 0 | 1186 | tavg = 0 |
1103 | for key in sorted(stats.keys(), key=sortkey): | 1187 | tcur = 0 |
1104 | if row >= self.screen.getmaxyx()[0] - 1: | 1188 | for key, values in get_sorted_events(self, stats): |
1105 | break | 1189 | if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0): |
1106 | values = stats[key] | ||
1107 | if not values[0] and not values[1]: | ||
1108 | break | 1190 | break |
1109 | if values[0] is not None: | 1191 | if self._display_guests: |
1110 | cur = int(round(values[1] / sleeptime)) if values[1] else '' | 1192 | key = self.get_gname_from_pid(key) |
1111 | if self._display_guests: | 1193 | if not key: |
1112 | key = self.get_gname_from_pid(key) | 1194 | continue |
1113 | self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % | 1195 | cur = int(round(values.delta / sleeptime)) if values.delta else '' |
1114 | (key, values[0], values[0] * 100 / total, | 1196 | if key[0] != ' ': |
1115 | cur)) | 1197 | if values.delta: |
1116 | if cur is not '' and key.find('(') is -1: | 1198 | tcur += values.delta |
1117 | tavg += cur | 1199 | ptotal = values.value |
1200 | ltotal = total | ||
1201 | else: | ||
1202 | ltotal = ptotal | ||
1203 | self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % (key, | ||
1204 | values.value, | ||
1205 | values.value * 100 / float(ltotal), cur)) | ||
1118 | row += 1 | 1206 | row += 1 |
1119 | if row == 3: | 1207 | if row == 3: |
1120 | self.screen.addstr(4, 1, 'No matching events reported yet') | 1208 | self.screen.addstr(4, 1, 'No matching events reported yet') |
1121 | else: | 1209 | if row > 4: |
1210 | tavg = int(round(tcur / sleeptime)) if tcur > 0 else '' | ||
1122 | self.screen.addstr(row, 1, '%-40s %10d %8s' % | 1211 | self.screen.addstr(row, 1, '%-40s %10d %8s' % |
1123 | ('Total', total, tavg if tavg else ''), | 1212 | ('Total', total, tavg), curses.A_BOLD) |
1124 | curses.A_BOLD) | ||
1125 | self.screen.refresh() | 1213 | self.screen.refresh() |
1126 | 1214 | ||
1127 | def show_msg(self, text): | 1215 | def _show_msg(self, text): |
1128 | """Display message centered text and exit on key press""" | 1216 | """Display message centered text and exit on key press""" |
1129 | hint = 'Press any key to continue' | 1217 | hint = 'Press any key to continue' |
1130 | curses.cbreak() | 1218 | curses.cbreak() |
@@ -1139,16 +1227,16 @@ class Tui(object): | |||
1139 | curses.A_STANDOUT) | 1227 | curses.A_STANDOUT) |
1140 | self.screen.getkey() | 1228 | self.screen.getkey() |
1141 | 1229 | ||
1142 | def show_help_interactive(self): | 1230 | def _show_help_interactive(self): |
1143 | """Display help with list of interactive commands""" | 1231 | """Display help with list of interactive commands""" |
1144 | msg = (' b toggle events by guests (debugfs only, honors' | 1232 | msg = (' b toggle events by guests (debugfs only, honors' |
1145 | ' filters)', | 1233 | ' filters)', |
1146 | ' c clear filter', | 1234 | ' c clear filter', |
1147 | ' f filter by regular expression', | 1235 | ' f filter by regular expression', |
1148 | ' g filter by guest name', | 1236 | ' g filter by guest name/PID', |
1149 | ' h display interactive commands reference', | 1237 | ' h display interactive commands reference', |
1150 | ' o toggle sorting order (Total vs CurAvg/s)', | 1238 | ' o toggle sorting order (Total vs CurAvg/s)', |
1151 | ' p filter by PID', | 1239 | ' p filter by guest name/PID', |
1152 | ' q quit', | 1240 | ' q quit', |
1153 | ' r reset stats', | 1241 | ' r reset stats', |
1154 | ' s set update interval', | 1242 | ' s set update interval', |
@@ -1165,14 +1253,15 @@ class Tui(object): | |||
1165 | self.screen.addstr(row, 0, line) | 1253 | self.screen.addstr(row, 0, line) |
1166 | row += 1 | 1254 | row += 1 |
1167 | self.screen.getkey() | 1255 | self.screen.getkey() |
1168 | self.refresh_header() | 1256 | self._refresh_header() |
1169 | 1257 | ||
1170 | def show_filter_selection(self): | 1258 | def _show_filter_selection(self): |
1171 | """Draws filter selection mask. | 1259 | """Draws filter selection mask. |
1172 | 1260 | ||
1173 | Asks for a valid regex and sets the fields filter accordingly. | 1261 | Asks for a valid regex and sets the fields filter accordingly. |
1174 | 1262 | ||
1175 | """ | 1263 | """ |
1264 | msg = '' | ||
1176 | while True: | 1265 | while True: |
1177 | self.screen.erase() | 1266 | self.screen.erase() |
1178 | self.screen.addstr(0, 0, | 1267 | self.screen.addstr(0, 0, |
@@ -1181,61 +1270,25 @@ class Tui(object): | |||
1181 | self.screen.addstr(2, 0, | 1270 | self.screen.addstr(2, 0, |
1182 | "Current regex: {0}" | 1271 | "Current regex: {0}" |
1183 | .format(self.stats.fields_filter)) | 1272 | .format(self.stats.fields_filter)) |
1273 | self.screen.addstr(5, 0, msg) | ||
1184 | self.screen.addstr(3, 0, "New regex: ") | 1274 | self.screen.addstr(3, 0, "New regex: ") |
1185 | curses.echo() | 1275 | curses.echo() |
1186 | regex = self.screen.getstr().decode(ENCODING) | 1276 | regex = self.screen.getstr().decode(ENCODING) |
1187 | curses.noecho() | 1277 | curses.noecho() |
1188 | if len(regex) == 0: | 1278 | if len(regex) == 0: |
1189 | self.stats.fields_filter = DEFAULT_REGEX | 1279 | self.stats.fields_filter = '' |
1190 | self.refresh_header() | 1280 | self._refresh_header() |
1191 | return | 1281 | return |
1192 | try: | 1282 | try: |
1193 | re.compile(regex) | 1283 | re.compile(regex) |
1194 | self.stats.fields_filter = regex | 1284 | self.stats.fields_filter = regex |
1195 | self.refresh_header() | 1285 | self._refresh_header() |
1196 | return | 1286 | return |
1197 | except re.error: | 1287 | except re.error: |
1288 | msg = '"' + regex + '": Not a valid regular expression' | ||
1198 | continue | 1289 | continue |
1199 | 1290 | ||
1200 | def show_vm_selection_by_pid(self): | 1291 | def _show_set_update_interval(self): |
1201 | """Draws PID selection mask. | ||
1202 | |||
1203 | Asks for a pid until a valid pid or 0 has been entered. | ||
1204 | |||
1205 | """ | ||
1206 | msg = '' | ||
1207 | while True: | ||
1208 | self.screen.erase() | ||
1209 | self.screen.addstr(0, 0, | ||
1210 | 'Show statistics for specific pid.', | ||
1211 | curses.A_BOLD) | ||
1212 | self.screen.addstr(1, 0, | ||
1213 | 'This might limit the shown data to the trace ' | ||
1214 | 'statistics.') | ||
1215 | self.screen.addstr(5, 0, msg) | ||
1216 | self.print_all_gnames(7) | ||
1217 | |||
1218 | curses.echo() | ||
1219 | self.screen.addstr(3, 0, "Pid [0 or pid]: ") | ||
1220 | pid = self.screen.getstr().decode(ENCODING) | ||
1221 | curses.noecho() | ||
1222 | |||
1223 | try: | ||
1224 | if len(pid) > 0: | ||
1225 | pid = int(pid) | ||
1226 | if pid != 0 and not os.path.isdir(os.path.join('/proc/', | ||
1227 | str(pid))): | ||
1228 | msg = '"' + str(pid) + '": Not a running process' | ||
1229 | continue | ||
1230 | else: | ||
1231 | pid = 0 | ||
1232 | self.refresh_header(pid) | ||
1233 | self.update_pid(pid) | ||
1234 | break | ||
1235 | except ValueError: | ||
1236 | msg = '"' + str(pid) + '": Not a valid pid' | ||
1237 | |||
1238 | def show_set_update_interval(self): | ||
1239 | """Draws update interval selection mask.""" | 1292 | """Draws update interval selection mask.""" |
1240 | msg = '' | 1293 | msg = '' |
1241 | while True: | 1294 | while True: |
@@ -1265,60 +1318,67 @@ class Tui(object): | |||
1265 | 1318 | ||
1266 | except ValueError: | 1319 | except ValueError: |
1267 | msg = '"' + str(val) + '": Invalid value' | 1320 | msg = '"' + str(val) + '": Invalid value' |
1268 | self.refresh_header() | 1321 | self._refresh_header() |
1269 | 1322 | ||
1270 | def show_vm_selection_by_guest_name(self): | 1323 | def _show_vm_selection_by_guest(self): |
1271 | """Draws guest selection mask. | 1324 | """Draws guest selection mask. |
1272 | 1325 | ||
1273 | Asks for a guest name until a valid guest name or '' is entered. | 1326 | Asks for a guest name or pid until a valid guest name or '' is entered. |
1274 | 1327 | ||
1275 | """ | 1328 | """ |
1276 | msg = '' | 1329 | msg = '' |
1277 | while True: | 1330 | while True: |
1278 | self.screen.erase() | 1331 | self.screen.erase() |
1279 | self.screen.addstr(0, 0, | 1332 | self.screen.addstr(0, 0, |
1280 | 'Show statistics for specific guest.', | 1333 | 'Show statistics for specific guest or pid.', |
1281 | curses.A_BOLD) | 1334 | curses.A_BOLD) |
1282 | self.screen.addstr(1, 0, | 1335 | self.screen.addstr(1, 0, |
1283 | 'This might limit the shown data to the trace ' | 1336 | 'This might limit the shown data to the trace ' |
1284 | 'statistics.') | 1337 | 'statistics.') |
1285 | self.screen.addstr(5, 0, msg) | 1338 | self.screen.addstr(5, 0, msg) |
1286 | self.print_all_gnames(7) | 1339 | self._print_all_gnames(7) |
1287 | curses.echo() | 1340 | curses.echo() |
1288 | self.screen.addstr(3, 0, "Guest [ENTER or guest]: ") | 1341 | curses.curs_set(1) |
1289 | gname = self.screen.getstr().decode(ENCODING) | 1342 | self.screen.addstr(3, 0, "Guest or pid [ENTER exits]: ") |
1343 | guest = self.screen.getstr().decode(ENCODING) | ||
1290 | curses.noecho() | 1344 | curses.noecho() |
1291 | 1345 | ||
1292 | if not gname: | 1346 | pid = 0 |
1293 | self.refresh_header(0) | 1347 | if not guest or guest == '0': |
1294 | self.update_pid(0) | ||
1295 | break | 1348 | break |
1296 | else: | 1349 | if guest.isdigit(): |
1297 | pids = [] | 1350 | if not os.path.isdir(os.path.join('/proc/', guest)): |
1298 | try: | 1351 | msg = '"' + guest + '": Not a running process' |
1299 | pids = self.get_pid_from_gname(gname) | ||
1300 | except: | ||
1301 | msg = '"' + gname + '": Internal error while searching, ' \ | ||
1302 | 'use pid filter instead' | ||
1303 | continue | ||
1304 | if len(pids) == 0: | ||
1305 | msg = '"' + gname + '": Not an active guest' | ||
1306 | continue | 1352 | continue |
1307 | if len(pids) > 1: | 1353 | pid = int(guest) |
1308 | msg = '"' + gname + '": Multiple matches found, use pid ' \ | ||
1309 | 'filter instead' | ||
1310 | continue | ||
1311 | self.refresh_header(pids[0]) | ||
1312 | self.update_pid(pids[0]) | ||
1313 | break | 1354 | break |
1355 | pids = [] | ||
1356 | try: | ||
1357 | pids = self.get_pid_from_gname(guest) | ||
1358 | except: | ||
1359 | msg = '"' + guest + '": Internal error while searching, ' \ | ||
1360 | 'use pid filter instead' | ||
1361 | continue | ||
1362 | if len(pids) == 0: | ||
1363 | msg = '"' + guest + '": Not an active guest' | ||
1364 | continue | ||
1365 | if len(pids) > 1: | ||
1366 | msg = '"' + guest + '": Multiple matches found, use pid ' \ | ||
1367 | 'filter instead' | ||
1368 | continue | ||
1369 | pid = pids[0] | ||
1370 | break | ||
1371 | curses.curs_set(0) | ||
1372 | self._refresh_header(pid) | ||
1373 | self._update_pid(pid) | ||
1314 | 1374 | ||
1315 | def show_stats(self): | 1375 | def show_stats(self): |
1316 | """Refreshes the screen and processes user input.""" | 1376 | """Refreshes the screen and processes user input.""" |
1317 | sleeptime = self._delay_initial | 1377 | sleeptime = self._delay_initial |
1318 | self.refresh_header() | 1378 | self._refresh_header() |
1319 | start = 0.0 # result based on init value never appears on screen | 1379 | start = 0.0 # result based on init value never appears on screen |
1320 | while True: | 1380 | while True: |
1321 | self.refresh_body(time.time() - start) | 1381 | self._refresh_body(time.time() - start) |
1322 | curses.halfdelay(int(sleeptime * 10)) | 1382 | curses.halfdelay(int(sleeptime * 10)) |
1323 | start = time.time() | 1383 | start = time.time() |
1324 | sleeptime = self._delay_regular | 1384 | sleeptime = self._delay_regular |
@@ -1327,47 +1387,39 @@ class Tui(object): | |||
1327 | if char == 'b': | 1387 | if char == 'b': |
1328 | self._display_guests = not self._display_guests | 1388 | self._display_guests = not self._display_guests |
1329 | if self.stats.toggle_display_guests(self._display_guests): | 1389 | if self.stats.toggle_display_guests(self._display_guests): |
1330 | self.show_msg(['Command not available with tracepoints' | 1390 | self._show_msg(['Command not available with ' |
1331 | ' enabled', 'Restart with debugfs only ' | 1391 | 'tracepoints enabled', 'Restart with ' |
1332 | '(see option \'-d\') and try again!']) | 1392 | 'debugfs only (see option \'-d\') and ' |
1393 | 'try again!']) | ||
1333 | self._display_guests = not self._display_guests | 1394 | self._display_guests = not self._display_guests |
1334 | self.refresh_header() | 1395 | self._refresh_header() |
1335 | if char == 'c': | 1396 | if char == 'c': |
1336 | self.stats.fields_filter = DEFAULT_REGEX | 1397 | self.stats.fields_filter = '' |
1337 | self.refresh_header(0) | 1398 | self._refresh_header(0) |
1338 | self.update_pid(0) | 1399 | self._update_pid(0) |
1339 | if char == 'f': | 1400 | if char == 'f': |
1340 | curses.curs_set(1) | 1401 | curses.curs_set(1) |
1341 | self.show_filter_selection() | 1402 | self._show_filter_selection() |
1342 | curses.curs_set(0) | 1403 | curses.curs_set(0) |
1343 | sleeptime = self._delay_initial | 1404 | sleeptime = self._delay_initial |
1344 | if char == 'g': | 1405 | if char == 'g' or char == 'p': |
1345 | curses.curs_set(1) | 1406 | self._show_vm_selection_by_guest() |
1346 | self.show_vm_selection_by_guest_name() | ||
1347 | curses.curs_set(0) | ||
1348 | sleeptime = self._delay_initial | 1407 | sleeptime = self._delay_initial |
1349 | if char == 'h': | 1408 | if char == 'h': |
1350 | self.show_help_interactive() | 1409 | self._show_help_interactive() |
1351 | if char == 'o': | 1410 | if char == 'o': |
1352 | self._sorting = not self._sorting | 1411 | self._sorting = not self._sorting |
1353 | if char == 'p': | ||
1354 | curses.curs_set(1) | ||
1355 | self.show_vm_selection_by_pid() | ||
1356 | curses.curs_set(0) | ||
1357 | sleeptime = self._delay_initial | ||
1358 | if char == 'q': | 1412 | if char == 'q': |
1359 | break | 1413 | break |
1360 | if char == 'r': | 1414 | if char == 'r': |
1361 | self.stats.reset() | 1415 | self.stats.reset() |
1362 | if char == 's': | 1416 | if char == 's': |
1363 | curses.curs_set(1) | 1417 | curses.curs_set(1) |
1364 | self.show_set_update_interval() | 1418 | self._show_set_update_interval() |
1365 | curses.curs_set(0) | 1419 | curses.curs_set(0) |
1366 | sleeptime = self._delay_initial | 1420 | sleeptime = self._delay_initial |
1367 | if char == 'x': | 1421 | if char == 'x': |
1368 | self.update_drilldown() | 1422 | self.stats.child_events = not self.stats.child_events |
1369 | # prevents display of current values on next refresh | ||
1370 | self.stats.get(self._display_guests) | ||
1371 | except KeyboardInterrupt: | 1423 | except KeyboardInterrupt: |
1372 | break | 1424 | break |
1373 | except curses.error: | 1425 | except curses.error: |
@@ -1380,9 +1432,9 @@ def batch(stats): | |||
1380 | s = stats.get() | 1432 | s = stats.get() |
1381 | time.sleep(1) | 1433 | time.sleep(1) |
1382 | s = stats.get() | 1434 | s = stats.get() |
1383 | for key in sorted(s.keys()): | 1435 | for key, values in sorted(s.items()): |
1384 | values = s[key] | 1436 | print('%-42s%10d%10d' % (key.split(' ')[0], values.value, |
1385 | print('%-42s%10d%10d' % (key, values[0], values[1])) | 1437 | values.delta)) |
1386 | except KeyboardInterrupt: | 1438 | except KeyboardInterrupt: |
1387 | pass | 1439 | pass |
1388 | 1440 | ||
@@ -1392,14 +1444,14 @@ def log(stats): | |||
1392 | keys = sorted(stats.get().keys()) | 1444 | keys = sorted(stats.get().keys()) |
1393 | 1445 | ||
1394 | def banner(): | 1446 | def banner(): |
1395 | for k in keys: | 1447 | for key in keys: |
1396 | print(k, end=' ') | 1448 | print(key.split(' ')[0], end=' ') |
1397 | print() | 1449 | print() |
1398 | 1450 | ||
1399 | def statline(): | 1451 | def statline(): |
1400 | s = stats.get() | 1452 | s = stats.get() |
1401 | for k in keys: | 1453 | for key in keys: |
1402 | print(' %9d' % s[k][1], end=' ') | 1454 | print(' %9d' % s[key].delta, end=' ') |
1403 | print() | 1455 | print() |
1404 | line = 0 | 1456 | line = 0 |
1405 | banner_repeat = 20 | 1457 | banner_repeat = 20 |
@@ -1504,7 +1556,7 @@ Press any other key to refresh statistics immediately. | |||
1504 | ) | 1556 | ) |
1505 | optparser.add_option('-f', '--fields', | 1557 | optparser.add_option('-f', '--fields', |
1506 | action='store', | 1558 | action='store', |
1507 | default=DEFAULT_REGEX, | 1559 | default='', |
1508 | dest='fields', | 1560 | dest='fields', |
1509 | help='''fields to display (regex) | 1561 | help='''fields to display (regex) |
1510 | "-f help" for a list of available events''', | 1562 | "-f help" for a list of available events''', |
@@ -1539,17 +1591,6 @@ Press any other key to refresh statistics immediately. | |||
1539 | 1591 | ||
1540 | def check_access(options): | 1592 | def check_access(options): |
1541 | """Exits if the current user can't access all needed directories.""" | 1593 | """Exits if the current user can't access all needed directories.""" |
1542 | if not os.path.exists('/sys/kernel/debug'): | ||
1543 | sys.stderr.write('Please enable CONFIG_DEBUG_FS in your kernel.') | ||
1544 | sys.exit(1) | ||
1545 | |||
1546 | if not os.path.exists(PATH_DEBUGFS_KVM): | ||
1547 | sys.stderr.write("Please make sure, that debugfs is mounted and " | ||
1548 | "readable by the current user:\n" | ||
1549 | "('mount -t debugfs debugfs /sys/kernel/debug')\n" | ||
1550 | "Also ensure, that the kvm modules are loaded.\n") | ||
1551 | sys.exit(1) | ||
1552 | |||
1553 | if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints or | 1594 | if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints or |
1554 | not options.debugfs): | 1595 | not options.debugfs): |
1555 | sys.stderr.write("Please enable CONFIG_TRACING in your kernel " | 1596 | sys.stderr.write("Please enable CONFIG_TRACING in your kernel " |
@@ -1567,7 +1608,33 @@ def check_access(options): | |||
1567 | return options | 1608 | return options |
1568 | 1609 | ||
1569 | 1610 | ||
1611 | def assign_globals(): | ||
1612 | global PATH_DEBUGFS_KVM | ||
1613 | global PATH_DEBUGFS_TRACING | ||
1614 | |||
1615 | debugfs = '' | ||
1616 | for line in file('/proc/mounts'): | ||
1617 | if line.split(' ')[0] == 'debugfs': | ||
1618 | debugfs = line.split(' ')[1] | ||
1619 | break | ||
1620 | if debugfs == '': | ||
1621 | sys.stderr.write("Please make sure that CONFIG_DEBUG_FS is enabled in " | ||
1622 | "your kernel, mounted and\nreadable by the current " | ||
1623 | "user:\n" | ||
1624 | "('mount -t debugfs debugfs /sys/kernel/debug')\n") | ||
1625 | sys.exit(1) | ||
1626 | |||
1627 | PATH_DEBUGFS_KVM = os.path.join(debugfs, 'kvm') | ||
1628 | PATH_DEBUGFS_TRACING = os.path.join(debugfs, 'tracing') | ||
1629 | |||
1630 | if not os.path.exists(PATH_DEBUGFS_KVM): | ||
1631 | sys.stderr.write("Please make sure that CONFIG_KVM is enabled in " | ||
1632 | "your kernel and that the modules are loaded.\n") | ||
1633 | sys.exit(1) | ||
1634 | |||
1635 | |||
1570 | def main(): | 1636 | def main(): |
1637 | assign_globals() | ||
1571 | options = get_options() | 1638 | options = get_options() |
1572 | options = check_access(options) | 1639 | options = check_access(options) |
1573 | 1640 | ||
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt index b5b3810c9e94..0811d860fe75 100644 --- a/tools/kvm/kvm_stat/kvm_stat.txt +++ b/tools/kvm/kvm_stat/kvm_stat.txt | |||
@@ -35,13 +35,13 @@ INTERACTIVE COMMANDS | |||
35 | 35 | ||
36 | *f*:: filter by regular expression | 36 | *f*:: filter by regular expression |
37 | 37 | ||
38 | *g*:: filter by guest name | 38 | *g*:: filter by guest name/PID |
39 | 39 | ||
40 | *h*:: display interactive commands reference | 40 | *h*:: display interactive commands reference |
41 | 41 | ||
42 | *o*:: toggle sorting order (Total vs CurAvg/s) | 42 | *o*:: toggle sorting order (Total vs CurAvg/s) |
43 | 43 | ||
44 | *p*:: filter by PID | 44 | *p*:: filter by guest name/PID |
45 | 45 | ||
46 | *q*:: quit | 46 | *q*:: quit |
47 | 47 | ||
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 57254f5b2779..694abc628e9b 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include "builtin.h" | 29 | #include "builtin.h" |
30 | #include "check.h" | 30 | #include "check.h" |
31 | 31 | ||
32 | bool no_fp, no_unreachable; | 32 | bool no_fp, no_unreachable, retpoline, module; |
33 | 33 | ||
34 | static const char * const check_usage[] = { | 34 | static const char * const check_usage[] = { |
35 | "objtool check [<options>] file.o", | 35 | "objtool check [<options>] file.o", |
@@ -39,6 +39,8 @@ static const char * const check_usage[] = { | |||
39 | const struct option check_options[] = { | 39 | const struct option check_options[] = { |
40 | OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"), | 40 | OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"), |
41 | OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"), | 41 | OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"), |
42 | OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"), | ||
43 | OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"), | ||
42 | OPT_END(), | 44 | OPT_END(), |
43 | }; | 45 | }; |
44 | 46 | ||
@@ -53,5 +55,5 @@ int cmd_check(int argc, const char **argv) | |||
53 | 55 | ||
54 | objname = argv[0]; | 56 | objname = argv[0]; |
55 | 57 | ||
56 | return check(objname, no_fp, no_unreachable, false); | 58 | return check(objname, false); |
57 | } | 59 | } |
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c index 91e8e19ff5e0..77ea2b97117d 100644 --- a/tools/objtool/builtin-orc.c +++ b/tools/objtool/builtin-orc.c | |||
@@ -25,7 +25,6 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <string.h> | 27 | #include <string.h> |
28 | #include <subcmd/parse-options.h> | ||
29 | #include "builtin.h" | 28 | #include "builtin.h" |
30 | #include "check.h" | 29 | #include "check.h" |
31 | 30 | ||
@@ -36,9 +35,6 @@ static const char *orc_usage[] = { | |||
36 | NULL, | 35 | NULL, |
37 | }; | 36 | }; |
38 | 37 | ||
39 | extern const struct option check_options[]; | ||
40 | extern bool no_fp, no_unreachable; | ||
41 | |||
42 | int cmd_orc(int argc, const char **argv) | 38 | int cmd_orc(int argc, const char **argv) |
43 | { | 39 | { |
44 | const char *objname; | 40 | const char *objname; |
@@ -54,7 +50,7 @@ int cmd_orc(int argc, const char **argv) | |||
54 | 50 | ||
55 | objname = argv[0]; | 51 | objname = argv[0]; |
56 | 52 | ||
57 | return check(objname, no_fp, no_unreachable, true); | 53 | return check(objname, true); |
58 | } | 54 | } |
59 | 55 | ||
60 | if (!strcmp(argv[0], "dump")) { | 56 | if (!strcmp(argv[0], "dump")) { |
diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h index dd526067fed5..28ff40e19a14 100644 --- a/tools/objtool/builtin.h +++ b/tools/objtool/builtin.h | |||
@@ -17,6 +17,11 @@ | |||
17 | #ifndef _BUILTIN_H | 17 | #ifndef _BUILTIN_H |
18 | #define _BUILTIN_H | 18 | #define _BUILTIN_H |
19 | 19 | ||
20 | #include <subcmd/parse-options.h> | ||
21 | |||
22 | extern const struct option check_options[]; | ||
23 | extern bool no_fp, no_unreachable, retpoline, module; | ||
24 | |||
20 | extern int cmd_check(int argc, const char **argv); | 25 | extern int cmd_check(int argc, const char **argv); |
21 | extern int cmd_orc(int argc, const char **argv); | 26 | extern int cmd_orc(int argc, const char **argv); |
22 | 27 | ||
diff --git a/tools/objtool/check.c b/tools/objtool/check.c index a8cb69a26576..92b6a2c21631 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <string.h> | 18 | #include <string.h> |
19 | #include <stdlib.h> | 19 | #include <stdlib.h> |
20 | 20 | ||
21 | #include "builtin.h" | ||
21 | #include "check.h" | 22 | #include "check.h" |
22 | #include "elf.h" | 23 | #include "elf.h" |
23 | #include "special.h" | 24 | #include "special.h" |
@@ -33,7 +34,6 @@ struct alternative { | |||
33 | }; | 34 | }; |
34 | 35 | ||
35 | const char *objname; | 36 | const char *objname; |
36 | static bool no_fp; | ||
37 | struct cfi_state initial_func_cfi; | 37 | struct cfi_state initial_func_cfi; |
38 | 38 | ||
39 | struct instruction *find_insn(struct objtool_file *file, | 39 | struct instruction *find_insn(struct objtool_file *file, |
@@ -497,6 +497,7 @@ static int add_jump_destinations(struct objtool_file *file) | |||
497 | * disguise, so convert them accordingly. | 497 | * disguise, so convert them accordingly. |
498 | */ | 498 | */ |
499 | insn->type = INSN_JUMP_DYNAMIC; | 499 | insn->type = INSN_JUMP_DYNAMIC; |
500 | insn->retpoline_safe = true; | ||
500 | continue; | 501 | continue; |
501 | } else { | 502 | } else { |
502 | /* sibling call */ | 503 | /* sibling call */ |
@@ -548,7 +549,8 @@ static int add_call_destinations(struct objtool_file *file) | |||
548 | if (!insn->call_dest && !insn->ignore) { | 549 | if (!insn->call_dest && !insn->ignore) { |
549 | WARN_FUNC("unsupported intra-function call", | 550 | WARN_FUNC("unsupported intra-function call", |
550 | insn->sec, insn->offset); | 551 | insn->sec, insn->offset); |
551 | WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE."); | 552 | if (retpoline) |
553 | WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE."); | ||
552 | return -1; | 554 | return -1; |
553 | } | 555 | } |
554 | 556 | ||
@@ -923,7 +925,11 @@ static struct rela *find_switch_table(struct objtool_file *file, | |||
923 | if (find_symbol_containing(file->rodata, text_rela->addend)) | 925 | if (find_symbol_containing(file->rodata, text_rela->addend)) |
924 | continue; | 926 | continue; |
925 | 927 | ||
926 | return find_rela_by_dest(file->rodata, text_rela->addend); | 928 | rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend); |
929 | if (!rodata_rela) | ||
930 | continue; | ||
931 | |||
932 | return rodata_rela; | ||
927 | } | 933 | } |
928 | 934 | ||
929 | return NULL; | 935 | return NULL; |
@@ -1108,6 +1114,41 @@ static int read_unwind_hints(struct objtool_file *file) | |||
1108 | return 0; | 1114 | return 0; |
1109 | } | 1115 | } |
1110 | 1116 | ||
1117 | static int read_retpoline_hints(struct objtool_file *file) | ||
1118 | { | ||
1119 | struct section *sec; | ||
1120 | struct instruction *insn; | ||
1121 | struct rela *rela; | ||
1122 | |||
1123 | sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe"); | ||
1124 | if (!sec) | ||
1125 | return 0; | ||
1126 | |||
1127 | list_for_each_entry(rela, &sec->rela_list, list) { | ||
1128 | if (rela->sym->type != STT_SECTION) { | ||
1129 | WARN("unexpected relocation symbol type in %s", sec->name); | ||
1130 | return -1; | ||
1131 | } | ||
1132 | |||
1133 | insn = find_insn(file, rela->sym->sec, rela->addend); | ||
1134 | if (!insn) { | ||
1135 | WARN("bad .discard.retpoline_safe entry"); | ||
1136 | return -1; | ||
1137 | } | ||
1138 | |||
1139 | if (insn->type != INSN_JUMP_DYNAMIC && | ||
1140 | insn->type != INSN_CALL_DYNAMIC) { | ||
1141 | WARN_FUNC("retpoline_safe hint not an indirect jump/call", | ||
1142 | insn->sec, insn->offset); | ||
1143 | return -1; | ||
1144 | } | ||
1145 | |||
1146 | insn->retpoline_safe = true; | ||
1147 | } | ||
1148 | |||
1149 | return 0; | ||
1150 | } | ||
1151 | |||
1111 | static int decode_sections(struct objtool_file *file) | 1152 | static int decode_sections(struct objtool_file *file) |
1112 | { | 1153 | { |
1113 | int ret; | 1154 | int ret; |
@@ -1146,6 +1187,10 @@ static int decode_sections(struct objtool_file *file) | |||
1146 | if (ret) | 1187 | if (ret) |
1147 | return ret; | 1188 | return ret; |
1148 | 1189 | ||
1190 | ret = read_retpoline_hints(file); | ||
1191 | if (ret) | ||
1192 | return ret; | ||
1193 | |||
1149 | return 0; | 1194 | return 0; |
1150 | } | 1195 | } |
1151 | 1196 | ||
@@ -1891,6 +1936,38 @@ static int validate_unwind_hints(struct objtool_file *file) | |||
1891 | return warnings; | 1936 | return warnings; |
1892 | } | 1937 | } |
1893 | 1938 | ||
1939 | static int validate_retpoline(struct objtool_file *file) | ||
1940 | { | ||
1941 | struct instruction *insn; | ||
1942 | int warnings = 0; | ||
1943 | |||
1944 | for_each_insn(file, insn) { | ||
1945 | if (insn->type != INSN_JUMP_DYNAMIC && | ||
1946 | insn->type != INSN_CALL_DYNAMIC) | ||
1947 | continue; | ||
1948 | |||
1949 | if (insn->retpoline_safe) | ||
1950 | continue; | ||
1951 | |||
1952 | /* | ||
1953 | * .init.text code is ran before userspace and thus doesn't | ||
1954 | * strictly need retpolines, except for modules which are | ||
1955 | * loaded late, they very much do need retpoline in their | ||
1956 | * .init.text | ||
1957 | */ | ||
1958 | if (!strcmp(insn->sec->name, ".init.text") && !module) | ||
1959 | continue; | ||
1960 | |||
1961 | WARN_FUNC("indirect %s found in RETPOLINE build", | ||
1962 | insn->sec, insn->offset, | ||
1963 | insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); | ||
1964 | |||
1965 | warnings++; | ||
1966 | } | ||
1967 | |||
1968 | return warnings; | ||
1969 | } | ||
1970 | |||
1894 | static bool is_kasan_insn(struct instruction *insn) | 1971 | static bool is_kasan_insn(struct instruction *insn) |
1895 | { | 1972 | { |
1896 | return (insn->type == INSN_CALL && | 1973 | return (insn->type == INSN_CALL && |
@@ -2022,13 +2099,12 @@ static void cleanup(struct objtool_file *file) | |||
2022 | elf_close(file->elf); | 2099 | elf_close(file->elf); |
2023 | } | 2100 | } |
2024 | 2101 | ||
2025 | int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc) | 2102 | int check(const char *_objname, bool orc) |
2026 | { | 2103 | { |
2027 | struct objtool_file file; | 2104 | struct objtool_file file; |
2028 | int ret, warnings = 0; | 2105 | int ret, warnings = 0; |
2029 | 2106 | ||
2030 | objname = _objname; | 2107 | objname = _objname; |
2031 | no_fp = _no_fp; | ||
2032 | 2108 | ||
2033 | file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY); | 2109 | file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY); |
2034 | if (!file.elf) | 2110 | if (!file.elf) |
@@ -2052,6 +2128,13 @@ int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc) | |||
2052 | if (list_empty(&file.insn_list)) | 2128 | if (list_empty(&file.insn_list)) |
2053 | goto out; | 2129 | goto out; |
2054 | 2130 | ||
2131 | if (retpoline) { | ||
2132 | ret = validate_retpoline(&file); | ||
2133 | if (ret < 0) | ||
2134 | return ret; | ||
2135 | warnings += ret; | ||
2136 | } | ||
2137 | |||
2055 | ret = validate_functions(&file); | 2138 | ret = validate_functions(&file); |
2056 | if (ret < 0) | 2139 | if (ret < 0) |
2057 | goto out; | 2140 | goto out; |
diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 23a1d065cae1..c6b68fcb926f 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h | |||
@@ -45,6 +45,7 @@ struct instruction { | |||
45 | unsigned char type; | 45 | unsigned char type; |
46 | unsigned long immediate; | 46 | unsigned long immediate; |
47 | bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; | 47 | bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; |
48 | bool retpoline_safe; | ||
48 | struct symbol *call_dest; | 49 | struct symbol *call_dest; |
49 | struct instruction *jump_dest; | 50 | struct instruction *jump_dest; |
50 | struct instruction *first_jump_src; | 51 | struct instruction *first_jump_src; |
@@ -63,7 +64,7 @@ struct objtool_file { | |||
63 | bool ignore_unreachables, c_file, hints; | 64 | bool ignore_unreachables, c_file, hints; |
64 | }; | 65 | }; |
65 | 66 | ||
66 | int check(const char *objname, bool no_fp, bool no_unreachable, bool orc); | 67 | int check(const char *objname, bool orc); |
67 | 68 | ||
68 | struct instruction *find_insn(struct objtool_file *file, | 69 | struct instruction *find_insn(struct objtool_file *file, |
69 | struct section *sec, unsigned long offset); | 70 | struct section *sec, unsigned long offset); |
diff --git a/tools/perf/Documentation/perf-kallsyms.txt b/tools/perf/Documentation/perf-kallsyms.txt index 954ea9e21236..cf9f4040ea5c 100644 --- a/tools/perf/Documentation/perf-kallsyms.txt +++ b/tools/perf/Documentation/perf-kallsyms.txt | |||
@@ -8,7 +8,7 @@ perf-kallsyms - Searches running kernel for symbols | |||
8 | SYNOPSIS | 8 | SYNOPSIS |
9 | -------- | 9 | -------- |
10 | [verse] | 10 | [verse] |
11 | 'perf kallsyms <options> symbol_name[,symbol_name...]' | 11 | 'perf kallsyms' [<options>] symbol_name[,symbol_name...] |
12 | 12 | ||
13 | DESCRIPTION | 13 | DESCRIPTION |
14 | ----------- | 14 | ----------- |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index bf4ca749d1ac..a217623fec2e 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -881,6 +881,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) | |||
881 | } | 881 | } |
882 | } | 882 | } |
883 | 883 | ||
884 | /* | ||
885 | * If we have just single event and are sending data | ||
886 | * through pipe, we need to force the ids allocation, | ||
887 | * because we synthesize event name through the pipe | ||
888 | * and need the id for that. | ||
889 | */ | ||
890 | if (data->is_pipe && rec->evlist->nr_entries == 1) | ||
891 | rec->opts.sample_id = true; | ||
892 | |||
884 | if (record__open(rec) != 0) { | 893 | if (record__open(rec) != 0) { |
885 | err = -1; | 894 | err = -1; |
886 | goto out_child; | 895 | goto out_child; |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 98bf9d32f222..54a4c152edb3 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -917,7 +917,7 @@ static void print_metric_csv(void *ctx, | |||
917 | char buf[64], *vals, *ends; | 917 | char buf[64], *vals, *ends; |
918 | 918 | ||
919 | if (unit == NULL || fmt == NULL) { | 919 | if (unit == NULL || fmt == NULL) { |
920 | fprintf(out, "%s%s%s%s", csv_sep, csv_sep, csv_sep, csv_sep); | 920 | fprintf(out, "%s%s", csv_sep, csv_sep); |
921 | return; | 921 | return; |
922 | } | 922 | } |
923 | snprintf(buf, sizeof(buf), fmt, val); | 923 | snprintf(buf, sizeof(buf), fmt, val); |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b7c823ba8374..35ac016fcb98 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -991,7 +991,7 @@ static int perf_top_overwrite_fallback(struct perf_top *top, | |||
991 | evlist__for_each_entry(evlist, counter) | 991 | evlist__for_each_entry(evlist, counter) |
992 | counter->attr.write_backward = false; | 992 | counter->attr.write_backward = false; |
993 | opts->overwrite = false; | 993 | opts->overwrite = false; |
994 | ui__warning("fall back to non-overwrite mode\n"); | 994 | pr_debug2("fall back to non-overwrite mode\n"); |
995 | return 1; | 995 | return 1; |
996 | } | 996 | } |
997 | 997 | ||
diff --git a/tools/perf/perf.h b/tools/perf/perf.h index cfe46236a5e5..57b9b342d533 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h | |||
@@ -61,6 +61,7 @@ struct record_opts { | |||
61 | bool tail_synthesize; | 61 | bool tail_synthesize; |
62 | bool overwrite; | 62 | bool overwrite; |
63 | bool ignore_missing_thread; | 63 | bool ignore_missing_thread; |
64 | bool sample_id; | ||
64 | unsigned int freq; | 65 | unsigned int freq; |
65 | unsigned int mmap_pages; | 66 | unsigned int mmap_pages; |
66 | unsigned int auxtrace_mmap_pages; | 67 | unsigned int auxtrace_mmap_pages; |
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 286427975112..fbf927cf775d 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c | |||
@@ -327,7 +327,32 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) | |||
327 | if (!disasm_line__is_valid_jump(cursor, sym)) | 327 | if (!disasm_line__is_valid_jump(cursor, sym)) |
328 | return; | 328 | return; |
329 | 329 | ||
330 | /* | ||
331 | * This first was seen with a gcc function, _cpp_lex_token, that | ||
332 | * has the usual jumps: | ||
333 | * | ||
334 | * │1159e6c: ↓ jne 115aa32 <_cpp_lex_token@@Base+0xf92> | ||
335 | * | ||
336 | * I.e. jumps to a label inside that function (_cpp_lex_token), and | ||
337 | * those works, but also this kind: | ||
338 | * | ||
339 | * │1159e8b: ↓ jne c469be <cpp_named_operator2name@@Base+0xa72> | ||
340 | * | ||
341 | * I.e. jumps to another function, outside _cpp_lex_token, which | ||
342 | * are not being correctly handled generating as a side effect references | ||
343 | * to ab->offset[] entries that are set to NULL, so to make this code | ||
344 | * more robust, check that here. | ||
345 | * | ||
346 | * A proper fix for will be put in place, looking at the function | ||
347 | * name right after the '<' token and probably treating this like a | ||
348 | * 'call' instruction. | ||
349 | */ | ||
330 | target = ab->offsets[cursor->ops.target.offset]; | 350 | target = ab->offsets[cursor->ops.target.offset]; |
351 | if (target == NULL) { | ||
352 | ui_helpline__printf("WARN: jump target inconsistency, press 'o', ab->offsets[%#x] = NULL\n", | ||
353 | cursor->ops.target.offset); | ||
354 | return; | ||
355 | } | ||
331 | 356 | ||
332 | bcursor = browser_line(&cursor->al); | 357 | bcursor = browser_line(&cursor->al); |
333 | btarget = browser_line(target); | 358 | btarget = browser_line(target); |
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 9faf3b5367db..6470ea2aa25e 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c | |||
@@ -60,6 +60,12 @@ | |||
60 | #include "sane_ctype.h" | 60 | #include "sane_ctype.h" |
61 | #include "symbol/kallsyms.h" | 61 | #include "symbol/kallsyms.h" |
62 | 62 | ||
63 | static bool auxtrace__dont_decode(struct perf_session *session) | ||
64 | { | ||
65 | return !session->itrace_synth_opts || | ||
66 | session->itrace_synth_opts->dont_decode; | ||
67 | } | ||
68 | |||
63 | int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, | 69 | int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, |
64 | struct auxtrace_mmap_params *mp, | 70 | struct auxtrace_mmap_params *mp, |
65 | void *userpg, int fd) | 71 | void *userpg, int fd) |
@@ -762,6 +768,9 @@ int auxtrace_queues__process_index(struct auxtrace_queues *queues, | |||
762 | size_t i; | 768 | size_t i; |
763 | int err; | 769 | int err; |
764 | 770 | ||
771 | if (auxtrace__dont_decode(session)) | ||
772 | return 0; | ||
773 | |||
765 | list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { | 774 | list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { |
766 | for (i = 0; i < auxtrace_index->nr; i++) { | 775 | for (i = 0; i < auxtrace_index->nr; i++) { |
767 | ent = &auxtrace_index->entries[i]; | 776 | ent = &auxtrace_index->entries[i]; |
@@ -892,12 +901,6 @@ out_free: | |||
892 | return err; | 901 | return err; |
893 | } | 902 | } |
894 | 903 | ||
895 | static bool auxtrace__dont_decode(struct perf_session *session) | ||
896 | { | ||
897 | return !session->itrace_synth_opts || | ||
898 | session->itrace_synth_opts->dont_decode; | ||
899 | } | ||
900 | |||
901 | int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused, | 904 | int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused, |
902 | union perf_event *event, | 905 | union perf_event *event, |
903 | struct perf_session *session) | 906 | struct perf_session *session) |
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c index 1e97937b03a9..6f09e4962dad 100644 --- a/tools/perf/util/record.c +++ b/tools/perf/util/record.c | |||
@@ -137,6 +137,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, | |||
137 | struct perf_evsel *evsel; | 137 | struct perf_evsel *evsel; |
138 | bool use_sample_identifier = false; | 138 | bool use_sample_identifier = false; |
139 | bool use_comm_exec; | 139 | bool use_comm_exec; |
140 | bool sample_id = opts->sample_id; | ||
140 | 141 | ||
141 | /* | 142 | /* |
142 | * Set the evsel leader links before we configure attributes, | 143 | * Set the evsel leader links before we configure attributes, |
@@ -163,8 +164,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, | |||
163 | * match the id. | 164 | * match the id. |
164 | */ | 165 | */ |
165 | use_sample_identifier = perf_can_sample_identifier(); | 166 | use_sample_identifier = perf_can_sample_identifier(); |
166 | evlist__for_each_entry(evlist, evsel) | 167 | sample_id = true; |
167 | perf_evsel__set_sample_id(evsel, use_sample_identifier); | ||
168 | } else if (evlist->nr_entries > 1) { | 168 | } else if (evlist->nr_entries > 1) { |
169 | struct perf_evsel *first = perf_evlist__first(evlist); | 169 | struct perf_evsel *first = perf_evlist__first(evlist); |
170 | 170 | ||
@@ -174,6 +174,10 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, | |||
174 | use_sample_identifier = perf_can_sample_identifier(); | 174 | use_sample_identifier = perf_can_sample_identifier(); |
175 | break; | 175 | break; |
176 | } | 176 | } |
177 | sample_id = true; | ||
178 | } | ||
179 | |||
180 | if (sample_id) { | ||
177 | evlist__for_each_entry(evlist, evsel) | 181 | evlist__for_each_entry(evlist, evsel) |
178 | perf_evsel__set_sample_id(evsel, use_sample_identifier); | 182 | perf_evsel__set_sample_id(evsel, use_sample_identifier); |
179 | } | 183 | } |
diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h index 370138e7e35c..88223bc7c82b 100644 --- a/tools/perf/util/trigger.h +++ b/tools/perf/util/trigger.h | |||
@@ -12,7 +12,7 @@ | |||
12 | * States and transits: | 12 | * States and transits: |
13 | * | 13 | * |
14 | * | 14 | * |
15 | * OFF--(on)--> READY --(hit)--> HIT | 15 | * OFF--> ON --> READY --(hit)--> HIT |
16 | * ^ | | 16 | * ^ | |
17 | * | (ready) | 17 | * | (ready) |
18 | * | | | 18 | * | | |
@@ -27,8 +27,9 @@ struct trigger { | |||
27 | volatile enum { | 27 | volatile enum { |
28 | TRIGGER_ERROR = -2, | 28 | TRIGGER_ERROR = -2, |
29 | TRIGGER_OFF = -1, | 29 | TRIGGER_OFF = -1, |
30 | TRIGGER_READY = 0, | 30 | TRIGGER_ON = 0, |
31 | TRIGGER_HIT = 1, | 31 | TRIGGER_READY = 1, |
32 | TRIGGER_HIT = 2, | ||
32 | } state; | 33 | } state; |
33 | const char *name; | 34 | const char *name; |
34 | }; | 35 | }; |
@@ -50,7 +51,7 @@ static inline bool trigger_is_error(struct trigger *t) | |||
50 | static inline void trigger_on(struct trigger *t) | 51 | static inline void trigger_on(struct trigger *t) |
51 | { | 52 | { |
52 | TRIGGER_WARN_ONCE(t, TRIGGER_OFF); | 53 | TRIGGER_WARN_ONCE(t, TRIGGER_OFF); |
53 | t->state = TRIGGER_READY; | 54 | t->state = TRIGGER_ON; |
54 | } | 55 | } |
55 | 56 | ||
56 | static inline void trigger_ready(struct trigger *t) | 57 | static inline void trigger_ready(struct trigger *t) |
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 44ef9eba5a7a..6c645eb77d42 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c | |||
@@ -178,6 +178,55 @@ void idr_get_next_test(int base) | |||
178 | idr_destroy(&idr); | 178 | idr_destroy(&idr); |
179 | } | 179 | } |
180 | 180 | ||
181 | int idr_u32_cb(int id, void *ptr, void *data) | ||
182 | { | ||
183 | BUG_ON(id < 0); | ||
184 | BUG_ON(ptr != DUMMY_PTR); | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | void idr_u32_test1(struct idr *idr, u32 handle) | ||
189 | { | ||
190 | static bool warned = false; | ||
191 | u32 id = handle; | ||
192 | int sid = 0; | ||
193 | void *ptr; | ||
194 | |||
195 | BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL)); | ||
196 | BUG_ON(id != handle); | ||
197 | BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC); | ||
198 | BUG_ON(id != handle); | ||
199 | if (!warned && id > INT_MAX) | ||
200 | printk("vvv Ignore these warnings\n"); | ||
201 | ptr = idr_get_next(idr, &sid); | ||
202 | if (id > INT_MAX) { | ||
203 | BUG_ON(ptr != NULL); | ||
204 | BUG_ON(sid != 0); | ||
205 | } else { | ||
206 | BUG_ON(ptr != DUMMY_PTR); | ||
207 | BUG_ON(sid != id); | ||
208 | } | ||
209 | idr_for_each(idr, idr_u32_cb, NULL); | ||
210 | if (!warned && id > INT_MAX) { | ||
211 | printk("^^^ Warnings over\n"); | ||
212 | warned = true; | ||
213 | } | ||
214 | BUG_ON(idr_remove(idr, id) != DUMMY_PTR); | ||
215 | BUG_ON(!idr_is_empty(idr)); | ||
216 | } | ||
217 | |||
218 | void idr_u32_test(int base) | ||
219 | { | ||
220 | DEFINE_IDR(idr); | ||
221 | idr_init_base(&idr, base); | ||
222 | idr_u32_test1(&idr, 10); | ||
223 | idr_u32_test1(&idr, 0x7fffffff); | ||
224 | idr_u32_test1(&idr, 0x80000000); | ||
225 | idr_u32_test1(&idr, 0x80000001); | ||
226 | idr_u32_test1(&idr, 0xffe00000); | ||
227 | idr_u32_test1(&idr, 0xffffffff); | ||
228 | } | ||
229 | |||
181 | void idr_checks(void) | 230 | void idr_checks(void) |
182 | { | 231 | { |
183 | unsigned long i; | 232 | unsigned long i; |
@@ -248,6 +297,9 @@ void idr_checks(void) | |||
248 | idr_get_next_test(0); | 297 | idr_get_next_test(0); |
249 | idr_get_next_test(1); | 298 | idr_get_next_test(1); |
250 | idr_get_next_test(4); | 299 | idr_get_next_test(4); |
300 | idr_u32_test(4); | ||
301 | idr_u32_test(1); | ||
302 | idr_u32_test(0); | ||
251 | } | 303 | } |
252 | 304 | ||
253 | /* | 305 | /* |
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index 6903ccf35595..44a0d1ad4408 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c | |||
@@ -29,7 +29,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) | |||
29 | { | 29 | { |
30 | struct radix_tree_node *node; | 30 | struct radix_tree_node *node; |
31 | 31 | ||
32 | if (flags & __GFP_NOWARN) | 32 | if (!(flags & __GFP_DIRECT_RECLAIM)) |
33 | return NULL; | 33 | return NULL; |
34 | 34 | ||
35 | pthread_mutex_lock(&cachep->lock); | 35 | pthread_mutex_lock(&cachep->lock); |
@@ -73,10 +73,17 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
73 | 73 | ||
74 | void *kmalloc(size_t size, gfp_t gfp) | 74 | void *kmalloc(size_t size, gfp_t gfp) |
75 | { | 75 | { |
76 | void *ret = malloc(size); | 76 | void *ret; |
77 | |||
78 | if (!(gfp & __GFP_DIRECT_RECLAIM)) | ||
79 | return NULL; | ||
80 | |||
81 | ret = malloc(size); | ||
77 | uatomic_inc(&nr_allocated); | 82 | uatomic_inc(&nr_allocated); |
78 | if (kmalloc_verbose) | 83 | if (kmalloc_verbose) |
79 | printf("Allocating %p from malloc\n", ret); | 84 | printf("Allocating %p from malloc\n", ret); |
85 | if (gfp & __GFP_ZERO) | ||
86 | memset(ret, 0, size); | ||
80 | return ret; | 87 | return ret; |
81 | } | 88 | } |
82 | 89 | ||
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/tools/testing/radix-tree/linux/compiler_types.h | |||
diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h index e9fff59dfd8a..e3201ccf54c3 100644 --- a/tools/testing/radix-tree/linux/gfp.h +++ b/tools/testing/radix-tree/linux/gfp.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #define __GFP_IO 0x40u | 11 | #define __GFP_IO 0x40u |
12 | #define __GFP_FS 0x80u | 12 | #define __GFP_FS 0x80u |
13 | #define __GFP_NOWARN 0x200u | 13 | #define __GFP_NOWARN 0x200u |
14 | #define __GFP_ZERO 0x8000u | ||
14 | #define __GFP_ATOMIC 0x80000u | 15 | #define __GFP_ATOMIC 0x80000u |
15 | #define __GFP_ACCOUNT 0x100000u | 16 | #define __GFP_ACCOUNT 0x100000u |
16 | #define __GFP_DIRECT_RECLAIM 0x400000u | 17 | #define __GFP_DIRECT_RECLAIM 0x400000u |
diff --git a/tools/testing/radix-tree/linux/slab.h b/tools/testing/radix-tree/linux/slab.h index 979baeec7e70..a037def0dec6 100644 --- a/tools/testing/radix-tree/linux/slab.h +++ b/tools/testing/radix-tree/linux/slab.h | |||
@@ -3,6 +3,7 @@ | |||
3 | #define SLAB_H | 3 | #define SLAB_H |
4 | 4 | ||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/gfp.h> | ||
6 | 7 | ||
7 | #define SLAB_HWCACHE_ALIGN 1 | 8 | #define SLAB_HWCACHE_ALIGN 1 |
8 | #define SLAB_PANIC 2 | 9 | #define SLAB_PANIC 2 |
@@ -11,6 +12,11 @@ | |||
11 | void *kmalloc(size_t size, gfp_t); | 12 | void *kmalloc(size_t size, gfp_t); |
12 | void kfree(void *); | 13 | void kfree(void *); |
13 | 14 | ||
15 | static inline void *kzalloc(size_t size, gfp_t gfp) | ||
16 | { | ||
17 | return kmalloc(size, gfp | __GFP_ZERO); | ||
18 | } | ||
19 | |||
14 | void *kmem_cache_alloc(struct kmem_cache *cachep, int flags); | 20 | void *kmem_cache_alloc(struct kmem_cache *cachep, int flags); |
15 | void kmem_cache_free(struct kmem_cache *cachep, void *objp); | 21 | void kmem_cache_free(struct kmem_cache *cachep, void *objp); |
16 | 22 | ||
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile index 1a7492268993..f6304d2be90c 100644 --- a/tools/testing/selftests/android/Makefile +++ b/tools/testing/selftests/android/Makefile | |||
@@ -11,11 +11,11 @@ all: | |||
11 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ | 11 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ |
12 | mkdir $$BUILD_TARGET -p; \ | 12 | mkdir $$BUILD_TARGET -p; \ |
13 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ | 13 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ |
14 | #SUBDIR test prog name should be in the form: SUBDIR_test.sh | 14 | #SUBDIR test prog name should be in the form: SUBDIR_test.sh \ |
15 | TEST=$$DIR"_test.sh"; \ | 15 | TEST=$$DIR"_test.sh"; \ |
16 | if [ -e $$DIR/$$TEST ]; then | 16 | if [ -e $$DIR/$$TEST ]; then \ |
17 | rsync -a $$DIR/$$TEST $$BUILD_TARGET/; | 17 | rsync -a $$DIR/$$TEST $$BUILD_TARGET/; \ |
18 | fi | 18 | fi \ |
19 | done | 19 | done |
20 | 20 | ||
21 | override define RUN_TESTS | 21 | override define RUN_TESTS |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index c73592fa3d41..437c0b1c9d21 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
@@ -11163,6 +11163,64 @@ static struct bpf_test tests[] = { | |||
11163 | .result = REJECT, | 11163 | .result = REJECT, |
11164 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | 11164 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, |
11165 | }, | 11165 | }, |
11166 | { | ||
11167 | "xadd/w check unaligned stack", | ||
11168 | .insns = { | ||
11169 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
11170 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), | ||
11171 | BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7), | ||
11172 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), | ||
11173 | BPF_EXIT_INSN(), | ||
11174 | }, | ||
11175 | .result = REJECT, | ||
11176 | .errstr = "misaligned stack access off", | ||
11177 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
11178 | }, | ||
11179 | { | ||
11180 | "xadd/w check unaligned map", | ||
11181 | .insns = { | ||
11182 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
11183 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
11184 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
11185 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
11186 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
11187 | BPF_FUNC_map_lookup_elem), | ||
11188 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
11189 | BPF_EXIT_INSN(), | ||
11190 | BPF_MOV64_IMM(BPF_REG_1, 1), | ||
11191 | BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3), | ||
11192 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3), | ||
11193 | BPF_EXIT_INSN(), | ||
11194 | }, | ||
11195 | .fixup_map1 = { 3 }, | ||
11196 | .result = REJECT, | ||
11197 | .errstr = "misaligned value access off", | ||
11198 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
11199 | }, | ||
11200 | { | ||
11201 | "xadd/w check unaligned pkt", | ||
11202 | .insns = { | ||
11203 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
11204 | offsetof(struct xdp_md, data)), | ||
11205 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
11206 | offsetof(struct xdp_md, data_end)), | ||
11207 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
11208 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
11209 | BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2), | ||
11210 | BPF_MOV64_IMM(BPF_REG_0, 99), | ||
11211 | BPF_JMP_IMM(BPF_JA, 0, 0, 6), | ||
11212 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
11213 | BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), | ||
11214 | BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0), | ||
11215 | BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1), | ||
11216 | BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2), | ||
11217 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1), | ||
11218 | BPF_EXIT_INSN(), | ||
11219 | }, | ||
11220 | .result = REJECT, | ||
11221 | .errstr = "BPF_XADD stores into R2 packet", | ||
11222 | .prog_type = BPF_PROG_TYPE_XDP, | ||
11223 | }, | ||
11166 | }; | 11224 | }; |
11167 | 11225 | ||
11168 | static int probe_filter_length(const struct bpf_insn *fp) | 11226 | static int probe_filter_length(const struct bpf_insn *fp) |
diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile index cea4adcd42b8..a63e8453984d 100644 --- a/tools/testing/selftests/futex/Makefile +++ b/tools/testing/selftests/futex/Makefile | |||
@@ -12,9 +12,9 @@ all: | |||
12 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ | 12 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ |
13 | mkdir $$BUILD_TARGET -p; \ | 13 | mkdir $$BUILD_TARGET -p; \ |
14 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ | 14 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ |
15 | if [ -e $$DIR/$(TEST_PROGS) ]; then | 15 | if [ -e $$DIR/$(TEST_PROGS) ]; then \ |
16 | rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; | 16 | rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \ |
17 | fi | 17 | fi \ |
18 | done | 18 | done |
19 | 19 | ||
20 | override define RUN_TESTS | 20 | override define RUN_TESTS |
diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config new file mode 100644 index 000000000000..835c7f4dadcd --- /dev/null +++ b/tools/testing/selftests/memfd/config | |||
@@ -0,0 +1 @@ | |||
CONFIG_FUSE_FS=m | |||
diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile index 86636d207adf..686da510f989 100644 --- a/tools/testing/selftests/memory-hotplug/Makefile +++ b/tools/testing/selftests/memory-hotplug/Makefile | |||
@@ -4,8 +4,9 @@ all: | |||
4 | include ../lib.mk | 4 | include ../lib.mk |
5 | 5 | ||
6 | TEST_PROGS := mem-on-off-test.sh | 6 | TEST_PROGS := mem-on-off-test.sh |
7 | override RUN_TESTS := ./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]" | 7 | override RUN_TESTS := @./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]" |
8 | override EMIT_TESTS := echo "$(RUN_TESTS)" | 8 | |
9 | override EMIT_TESTS := echo "$(subst @,,$(RUN_TESTS))" | ||
9 | 10 | ||
10 | run_full_test: | 11 | run_full_test: |
11 | @/bin/bash ./mem-on-off-test.sh && echo "memory-hotplug selftests: [PASS]" || echo "memory-hotplug selftests: [FAIL]" | 12 | @/bin/bash ./mem-on-off-test.sh && echo "memory-hotplug selftests: [PASS]" || echo "memory-hotplug selftests: [FAIL]" |
diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c index 35ade7406dcd..3ae77ba93208 100644 --- a/tools/testing/selftests/powerpc/mm/subpage_prot.c +++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c | |||
@@ -135,6 +135,16 @@ static int run_test(void *addr, unsigned long size) | |||
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | static int syscall_available(void) | ||
139 | { | ||
140 | int rc; | ||
141 | |||
142 | errno = 0; | ||
143 | rc = syscall(__NR_subpage_prot, 0, 0, 0); | ||
144 | |||
145 | return rc == 0 || (errno != ENOENT && errno != ENOSYS); | ||
146 | } | ||
147 | |||
138 | int test_anon(void) | 148 | int test_anon(void) |
139 | { | 149 | { |
140 | unsigned long align; | 150 | unsigned long align; |
@@ -145,6 +155,8 @@ int test_anon(void) | |||
145 | void *mallocblock; | 155 | void *mallocblock; |
146 | unsigned long mallocsize; | 156 | unsigned long mallocsize; |
147 | 157 | ||
158 | SKIP_IF(!syscall_available()); | ||
159 | |||
148 | if (getpagesize() != 0x10000) { | 160 | if (getpagesize() != 0x10000) { |
149 | fprintf(stderr, "Kernel page size must be 64K!\n"); | 161 | fprintf(stderr, "Kernel page size must be 64K!\n"); |
150 | return 1; | 162 | return 1; |
@@ -180,6 +192,8 @@ int test_file(void) | |||
180 | off_t filesize; | 192 | off_t filesize; |
181 | int fd; | 193 | int fd; |
182 | 194 | ||
195 | SKIP_IF(!syscall_available()); | ||
196 | |||
183 | fd = open(file_name, O_RDWR); | 197 | fd = open(file_name, O_RDWR); |
184 | if (fd == -1) { | 198 | if (fd == -1) { |
185 | perror("failed to open file"); | 199 | perror("failed to open file"); |
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile index a23453943ad2..5c72ff978f27 100644 --- a/tools/testing/selftests/powerpc/tm/Makefile +++ b/tools/testing/selftests/powerpc/tm/Makefile | |||
@@ -16,7 +16,7 @@ $(OUTPUT)/tm-syscall: tm-syscall-asm.S | |||
16 | $(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include | 16 | $(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include |
17 | $(OUTPUT)/tm-tmspr: CFLAGS += -pthread | 17 | $(OUTPUT)/tm-tmspr: CFLAGS += -pthread |
18 | $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64 | 18 | $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64 |
19 | $(OUTPUT)/tm-resched-dscr: ../pmu/lib.o | 19 | $(OUTPUT)/tm-resched-dscr: ../pmu/lib.c |
20 | $(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx | 20 | $(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx |
21 | $(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64 | 21 | $(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64 |
22 | 22 | ||
diff --git a/tools/testing/selftests/powerpc/tm/tm-trap.c b/tools/testing/selftests/powerpc/tm/tm-trap.c index 5d92c23ee6cb..179d592f0073 100644 --- a/tools/testing/selftests/powerpc/tm/tm-trap.c +++ b/tools/testing/selftests/powerpc/tm/tm-trap.c | |||
@@ -255,6 +255,8 @@ int tm_trap_test(void) | |||
255 | 255 | ||
256 | struct sigaction trap_sa; | 256 | struct sigaction trap_sa; |
257 | 257 | ||
258 | SKIP_IF(!have_htm()); | ||
259 | |||
258 | trap_sa.sa_flags = SA_SIGINFO; | 260 | trap_sa.sa_flags = SA_SIGINFO; |
259 | trap_sa.sa_sigaction = trap_signal_handler; | 261 | trap_sa.sa_sigaction = trap_signal_handler; |
260 | sigaction(SIGTRAP, &trap_sa, NULL); | 262 | sigaction(SIGTRAP, &trap_sa, NULL); |
diff --git a/tools/testing/selftests/pstore/config b/tools/testing/selftests/pstore/config index 6a8e5a9bfc10..d148f9f89fb6 100644 --- a/tools/testing/selftests/pstore/config +++ b/tools/testing/selftests/pstore/config | |||
@@ -2,3 +2,4 @@ CONFIG_MISC_FILESYSTEMS=y | |||
2 | CONFIG_PSTORE=y | 2 | CONFIG_PSTORE=y |
3 | CONFIG_PSTORE_PMSG=y | 3 | CONFIG_PSTORE_PMSG=y |
4 | CONFIG_PSTORE_CONSOLE=y | 4 | CONFIG_PSTORE_CONSOLE=y |
5 | CONFIG_PSTORE_RAM=m | ||
diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile index b3c8ba3cb668..d0121a8a3523 100644 --- a/tools/testing/selftests/sync/Makefile +++ b/tools/testing/selftests/sync/Makefile | |||
@@ -30,7 +30,7 @@ $(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS) | |||
30 | $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS) | 30 | $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS) |
31 | 31 | ||
32 | $(OBJS): $(OUTPUT)/%.o: %.c | 32 | $(OBJS): $(OUTPUT)/%.o: %.c |
33 | $(CC) -c $^ -o $@ | 33 | $(CC) -c $^ -o $@ $(CFLAGS) |
34 | 34 | ||
35 | $(TESTS): $(OUTPUT)/%.o: %.c | 35 | $(TESTS): $(OUTPUT)/%.o: %.c |
36 | $(CC) -c $^ -o $@ | 36 | $(CC) -c $^ -o $@ |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json index e34075059c26..90bba48c3f07 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json | |||
@@ -315,7 +315,7 @@ | |||
315 | "cmdUnderTest": "$TC actions ls action skbmod", | 315 | "cmdUnderTest": "$TC actions ls action skbmod", |
316 | "expExitCode": "0", | 316 | "expExitCode": "0", |
317 | "verifyCmd": "$TC actions get action skbmod index 4", | 317 | "verifyCmd": "$TC actions get action skbmod index 4", |
318 | "matchPattern": "action order [0-9]*: skbmod pipe set etype 0x0031", | 318 | "matchPattern": "action order [0-9]*: skbmod pipe set etype 0x31", |
319 | "matchCount": "1", | 319 | "matchCount": "1", |
320 | "teardown": [ | 320 | "teardown": [ |
321 | "$TC actions flush action skbmod" | 321 | "$TC actions flush action skbmod" |
diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile index 3d5a62ff7d31..f5d7a7851e21 100644 --- a/tools/testing/selftests/vDSO/Makefile +++ b/tools/testing/selftests/vDSO/Makefile | |||
@@ -1,4 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | include ../lib.mk | ||
3 | |||
2 | ifndef CROSS_COMPILE | 4 | ifndef CROSS_COMPILE |
3 | CFLAGS := -std=gnu99 | 5 | CFLAGS := -std=gnu99 |
4 | CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector | 6 | CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector |
@@ -6,16 +8,14 @@ ifeq ($(CONFIG_X86_32),y) | |||
6 | LDLIBS += -lgcc_s | 8 | LDLIBS += -lgcc_s |
7 | endif | 9 | endif |
8 | 10 | ||
9 | TEST_PROGS := vdso_test vdso_standalone_test_x86 | 11 | TEST_PROGS := $(OUTPUT)/vdso_test $(OUTPUT)/vdso_standalone_test_x86 |
10 | 12 | ||
11 | all: $(TEST_PROGS) | 13 | all: $(TEST_PROGS) |
12 | vdso_test: parse_vdso.c vdso_test.c | 14 | $(OUTPUT)/vdso_test: parse_vdso.c vdso_test.c |
13 | vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c | 15 | $(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c |
14 | $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \ | 16 | $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \ |
15 | vdso_standalone_test_x86.c parse_vdso.c \ | 17 | vdso_standalone_test_x86.c parse_vdso.c \ |
16 | -o vdso_standalone_test_x86 | 18 | -o $@ |
17 | 19 | ||
18 | include ../lib.mk | 20 | EXTRA_CLEAN := $(TEST_PROGS) |
19 | clean: | ||
20 | rm -fr $(TEST_PROGS) | ||
21 | endif | 21 | endif |
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore index 63c94d776e89..342c7bc9dc8c 100644 --- a/tools/testing/selftests/vm/.gitignore +++ b/tools/testing/selftests/vm/.gitignore | |||
@@ -11,3 +11,4 @@ mlock-intersect-test | |||
11 | mlock-random-test | 11 | mlock-random-test |
12 | virtual_address_range | 12 | virtual_address_range |
13 | gup_benchmark | 13 | gup_benchmark |
14 | va_128TBswitch | ||
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index d2561895a021..22d564673830 100755 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests | |||
@@ -2,25 +2,33 @@ | |||
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | #please run as root | 3 | #please run as root |
4 | 4 | ||
5 | #we need 256M, below is the size in kB | ||
6 | needmem=262144 | ||
7 | mnt=./huge | 5 | mnt=./huge |
8 | exitcode=0 | 6 | exitcode=0 |
9 | 7 | ||
10 | #get pagesize and freepages from /proc/meminfo | 8 | #get huge pagesize and freepages from /proc/meminfo |
11 | while read name size unit; do | 9 | while read name size unit; do |
12 | if [ "$name" = "HugePages_Free:" ]; then | 10 | if [ "$name" = "HugePages_Free:" ]; then |
13 | freepgs=$size | 11 | freepgs=$size |
14 | fi | 12 | fi |
15 | if [ "$name" = "Hugepagesize:" ]; then | 13 | if [ "$name" = "Hugepagesize:" ]; then |
16 | pgsize=$size | 14 | hpgsize_KB=$size |
17 | fi | 15 | fi |
18 | done < /proc/meminfo | 16 | done < /proc/meminfo |
19 | 17 | ||
18 | # Simple hugetlbfs tests have a hardcoded minimum requirement of | ||
19 | # huge pages totaling 256MB (262144KB) in size. The userfaultfd | ||
20 | # hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take | ||
21 | # both of these requirements into account and attempt to increase | ||
22 | # number of huge pages available. | ||
23 | nr_cpus=$(nproc) | ||
24 | hpgsize_MB=$((hpgsize_KB / 1024)) | ||
25 | half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128)) | ||
26 | needmem_KB=$((half_ufd_size_MB * 2 * 1024)) | ||
27 | |||
20 | #set proper nr_hugepages | 28 | #set proper nr_hugepages |
21 | if [ -n "$freepgs" ] && [ -n "$pgsize" ]; then | 29 | if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then |
22 | nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` | 30 | nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` |
23 | needpgs=`expr $needmem / $pgsize` | 31 | needpgs=$((needmem_KB / hpgsize_KB)) |
24 | tries=2 | 32 | tries=2 |
25 | while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do | 33 | while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do |
26 | lackpgs=$(( $needpgs - $freepgs )) | 34 | lackpgs=$(( $needpgs - $freepgs )) |
@@ -107,8 +115,9 @@ fi | |||
107 | echo "---------------------------" | 115 | echo "---------------------------" |
108 | echo "running userfaultfd_hugetlb" | 116 | echo "running userfaultfd_hugetlb" |
109 | echo "---------------------------" | 117 | echo "---------------------------" |
110 | # 256MB total huge pages == 128MB src and 128MB dst | 118 | # Test requires source and destination huge pages. Size of source |
111 | ./userfaultfd hugetlb 128 32 $mnt/ufd_test_file | 119 | # (half_ufd_size_MB) is passed as argument to test. |
120 | ./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file | ||
112 | if [ $? -ne 0 ]; then | 121 | if [ $? -ne 0 ]; then |
113 | echo "[FAIL]" | 122 | echo "[FAIL]" |
114 | exitcode=1 | 123 | exitcode=1 |
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c index 361466a2eaef..ade443a88421 100644 --- a/tools/testing/selftests/x86/entry_from_vm86.c +++ b/tools/testing/selftests/x86/entry_from_vm86.c | |||
@@ -95,6 +95,10 @@ asm ( | |||
95 | "int3\n\t" | 95 | "int3\n\t" |
96 | "vmcode_int80:\n\t" | 96 | "vmcode_int80:\n\t" |
97 | "int $0x80\n\t" | 97 | "int $0x80\n\t" |
98 | "vmcode_popf_hlt:\n\t" | ||
99 | "push %ax\n\t" | ||
100 | "popf\n\t" | ||
101 | "hlt\n\t" | ||
98 | "vmcode_umip:\n\t" | 102 | "vmcode_umip:\n\t" |
99 | /* addressing via displacements */ | 103 | /* addressing via displacements */ |
100 | "smsw (2052)\n\t" | 104 | "smsw (2052)\n\t" |
@@ -124,8 +128,8 @@ asm ( | |||
124 | 128 | ||
125 | extern unsigned char vmcode[], end_vmcode[]; | 129 | extern unsigned char vmcode[], end_vmcode[]; |
126 | extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[], | 130 | extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[], |
127 | vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_umip[], | 131 | vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_popf_hlt[], |
128 | vmcode_umip_str[], vmcode_umip_sldt[]; | 132 | vmcode_umip[], vmcode_umip_str[], vmcode_umip_sldt[]; |
129 | 133 | ||
130 | /* Returns false if the test was skipped. */ | 134 | /* Returns false if the test was skipped. */ |
131 | static bool do_test(struct vm86plus_struct *v86, unsigned long eip, | 135 | static bool do_test(struct vm86plus_struct *v86, unsigned long eip, |
@@ -175,7 +179,7 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip, | |||
175 | (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) { | 179 | (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) { |
176 | printf("[OK]\tReturned correctly\n"); | 180 | printf("[OK]\tReturned correctly\n"); |
177 | } else { | 181 | } else { |
178 | printf("[FAIL]\tIncorrect return reason\n"); | 182 | printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n", eip, v86->regs.eip); |
179 | nerrs++; | 183 | nerrs++; |
180 | } | 184 | } |
181 | 185 | ||
@@ -264,6 +268,9 @@ int main(void) | |||
264 | v86.regs.ds = load_addr / 16; | 268 | v86.regs.ds = load_addr / 16; |
265 | v86.regs.es = load_addr / 16; | 269 | v86.regs.es = load_addr / 16; |
266 | 270 | ||
271 | /* Use the end of the page as our stack. */ | ||
272 | v86.regs.esp = 4096; | ||
273 | |||
267 | assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */ | 274 | assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */ |
268 | 275 | ||
269 | /* #BR -- should deliver SIG??? */ | 276 | /* #BR -- should deliver SIG??? */ |
@@ -295,6 +302,23 @@ int main(void) | |||
295 | v86.regs.eflags &= ~X86_EFLAGS_IF; | 302 | v86.regs.eflags &= ~X86_EFLAGS_IF; |
296 | do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set"); | 303 | do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set"); |
297 | 304 | ||
305 | /* POPF with VIP set but IF clear: should not trap */ | ||
306 | v86.regs.eflags = X86_EFLAGS_VIP; | ||
307 | v86.regs.eax = 0; | ||
308 | do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP set and IF clear"); | ||
309 | |||
310 | /* POPF with VIP set and IF set: should trap */ | ||
311 | v86.regs.eflags = X86_EFLAGS_VIP; | ||
312 | v86.regs.eax = X86_EFLAGS_IF; | ||
313 | do_test(&v86, vmcode_popf_hlt - vmcode, VM86_STI, 0, "POPF with VIP and IF set"); | ||
314 | |||
315 | /* POPF with VIP clear and IF set: should not trap */ | ||
316 | v86.regs.eflags = 0; | ||
317 | v86.regs.eax = X86_EFLAGS_IF; | ||
318 | do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP clear and IF set"); | ||
319 | |||
320 | v86.regs.eflags = 0; | ||
321 | |||
298 | /* INT3 -- should cause #BP */ | 322 | /* INT3 -- should cause #BP */ |
299 | do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3"); | 323 | do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3"); |
300 | 324 | ||
@@ -318,7 +342,7 @@ int main(void) | |||
318 | clearhandler(SIGSEGV); | 342 | clearhandler(SIGSEGV); |
319 | 343 | ||
320 | /* Make sure nothing explodes if we fork. */ | 344 | /* Make sure nothing explodes if we fork. */ |
321 | if (fork() > 0) | 345 | if (fork() == 0) |
322 | return 0; | 346 | return 0; |
323 | 347 | ||
324 | return (nerrs == 0 ? 0 : 1); | 348 | return (nerrs == 0 ? 0 : 1); |
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c index be81621446f0..0b4f1cc2291c 100644 --- a/tools/testing/selftests/x86/test_vsyscall.c +++ b/tools/testing/selftests/x86/test_vsyscall.c | |||
@@ -450,7 +450,7 @@ static void sigtrap(int sig, siginfo_t *info, void *ctx_void) | |||
450 | num_vsyscall_traps++; | 450 | num_vsyscall_traps++; |
451 | } | 451 | } |
452 | 452 | ||
453 | static int test_native_vsyscall(void) | 453 | static int test_emulation(void) |
454 | { | 454 | { |
455 | time_t tmp; | 455 | time_t tmp; |
456 | bool is_native; | 456 | bool is_native; |
@@ -458,7 +458,7 @@ static int test_native_vsyscall(void) | |||
458 | if (!vtime) | 458 | if (!vtime) |
459 | return 0; | 459 | return 0; |
460 | 460 | ||
461 | printf("[RUN]\tchecking for native vsyscall\n"); | 461 | printf("[RUN]\tchecking that vsyscalls are emulated\n"); |
462 | sethandler(SIGTRAP, sigtrap, 0); | 462 | sethandler(SIGTRAP, sigtrap, 0); |
463 | set_eflags(get_eflags() | X86_EFLAGS_TF); | 463 | set_eflags(get_eflags() | X86_EFLAGS_TF); |
464 | vtime(&tmp); | 464 | vtime(&tmp); |
@@ -474,11 +474,12 @@ static int test_native_vsyscall(void) | |||
474 | */ | 474 | */ |
475 | is_native = (num_vsyscall_traps > 1); | 475 | is_native = (num_vsyscall_traps > 1); |
476 | 476 | ||
477 | printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n", | 477 | printf("[%s]\tvsyscalls are %s (%d instructions in vsyscall page)\n", |
478 | (is_native ? "FAIL" : "OK"), | ||
478 | (is_native ? "native" : "emulated"), | 479 | (is_native ? "native" : "emulated"), |
479 | (int)num_vsyscall_traps); | 480 | (int)num_vsyscall_traps); |
480 | 481 | ||
481 | return 0; | 482 | return is_native; |
482 | } | 483 | } |
483 | #endif | 484 | #endif |
484 | 485 | ||
@@ -498,7 +499,7 @@ int main(int argc, char **argv) | |||
498 | nerrs += test_vsys_r(); | 499 | nerrs += test_vsys_r(); |
499 | 500 | ||
500 | #ifdef __x86_64__ | 501 | #ifdef __x86_64__ |
501 | nerrs += test_native_vsyscall(); | 502 | nerrs += test_emulation(); |
502 | #endif | 503 | #endif |
503 | 504 | ||
504 | return nerrs ? 1 : 0; | 505 | return nerrs ? 1 : 0; |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 70268c0bec79..282389eb204f 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -36,6 +36,8 @@ static struct timecounter *timecounter; | |||
36 | static unsigned int host_vtimer_irq; | 36 | static unsigned int host_vtimer_irq; |
37 | static u32 host_vtimer_irq_flags; | 37 | static u32 host_vtimer_irq_flags; |
38 | 38 | ||
39 | static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); | ||
40 | |||
39 | static const struct kvm_irq_level default_ptimer_irq = { | 41 | static const struct kvm_irq_level default_ptimer_irq = { |
40 | .irq = 30, | 42 | .irq = 30, |
41 | .level = 1, | 43 | .level = 1, |
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void) | |||
56 | return timecounter->cc->read(timecounter->cc); | 58 | return timecounter->cc->read(timecounter->cc); |
57 | } | 59 | } |
58 | 60 | ||
61 | static inline bool userspace_irqchip(struct kvm *kvm) | ||
62 | { | ||
63 | return static_branch_unlikely(&userspace_irqchip_in_use) && | ||
64 | unlikely(!irqchip_in_kernel(kvm)); | ||
65 | } | ||
66 | |||
59 | static void soft_timer_start(struct hrtimer *hrt, u64 ns) | 67 | static void soft_timer_start(struct hrtimer *hrt, u64 ns) |
60 | { | 68 | { |
61 | hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), | 69 | hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), |
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) | |||
69 | cancel_work_sync(work); | 77 | cancel_work_sync(work); |
70 | } | 78 | } |
71 | 79 | ||
72 | static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu) | ||
73 | { | ||
74 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | ||
75 | |||
76 | /* | ||
77 | * When using a userspace irqchip with the architected timers, we must | ||
78 | * prevent continuously exiting from the guest, and therefore mask the | ||
79 | * physical interrupt by disabling it on the host interrupt controller | ||
80 | * when the virtual level is high, such that the guest can make | ||
81 | * forward progress. Once we detect the output level being | ||
82 | * de-asserted, we unmask the interrupt again so that we exit from the | ||
83 | * guest when the timer fires. | ||
84 | */ | ||
85 | if (vtimer->irq.level) | ||
86 | disable_percpu_irq(host_vtimer_irq); | ||
87 | else | ||
88 | enable_percpu_irq(host_vtimer_irq, 0); | ||
89 | } | ||
90 | |||
91 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | 80 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) |
92 | { | 81 | { |
93 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; | 82 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; |
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | |||
106 | if (kvm_timer_should_fire(vtimer)) | 95 | if (kvm_timer_should_fire(vtimer)) |
107 | kvm_timer_update_irq(vcpu, true, vtimer); | 96 | kvm_timer_update_irq(vcpu, true, vtimer); |
108 | 97 | ||
109 | if (static_branch_unlikely(&userspace_irqchip_in_use) && | 98 | if (userspace_irqchip(vcpu->kvm) && |
110 | unlikely(!irqchip_in_kernel(vcpu->kvm))) | 99 | !static_branch_unlikely(&has_gic_active_state)) |
111 | kvm_vtimer_update_mask_user(vcpu); | 100 | disable_percpu_irq(host_vtimer_irq); |
112 | 101 | ||
113 | return IRQ_HANDLED; | 102 | return IRQ_HANDLED; |
114 | } | 103 | } |
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, | |||
290 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, | 279 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, |
291 | timer_ctx->irq.level); | 280 | timer_ctx->irq.level); |
292 | 281 | ||
293 | if (!static_branch_unlikely(&userspace_irqchip_in_use) || | 282 | if (!userspace_irqchip(vcpu->kvm)) { |
294 | likely(irqchip_in_kernel(vcpu->kvm))) { | ||
295 | ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, | 283 | ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, |
296 | timer_ctx->irq.irq, | 284 | timer_ctx->irq.irq, |
297 | timer_ctx->irq.level, | 285 | timer_ctx->irq.level, |
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) | |||
350 | phys_timer_emulate(vcpu); | 338 | phys_timer_emulate(vcpu); |
351 | } | 339 | } |
352 | 340 | ||
353 | static void __timer_snapshot_state(struct arch_timer_context *timer) | ||
354 | { | ||
355 | timer->cnt_ctl = read_sysreg_el0(cntv_ctl); | ||
356 | timer->cnt_cval = read_sysreg_el0(cntv_cval); | ||
357 | } | ||
358 | |||
359 | static void vtimer_save_state(struct kvm_vcpu *vcpu) | 341 | static void vtimer_save_state(struct kvm_vcpu *vcpu) |
360 | { | 342 | { |
361 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 343 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu) | |||
367 | if (!vtimer->loaded) | 349 | if (!vtimer->loaded) |
368 | goto out; | 350 | goto out; |
369 | 351 | ||
370 | if (timer->enabled) | 352 | if (timer->enabled) { |
371 | __timer_snapshot_state(vtimer); | 353 | vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); |
354 | vtimer->cnt_cval = read_sysreg_el0(cntv_cval); | ||
355 | } | ||
372 | 356 | ||
373 | /* Disable the virtual timer */ | 357 | /* Disable the virtual timer */ |
374 | write_sysreg_el0(0, cntv_ctl); | 358 | write_sysreg_el0(0, cntv_ctl); |
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff) | |||
460 | kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); | 444 | kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); |
461 | } | 445 | } |
462 | 446 | ||
463 | static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu) | 447 | static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active) |
448 | { | ||
449 | int r; | ||
450 | r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active); | ||
451 | WARN_ON(r); | ||
452 | } | ||
453 | |||
454 | static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu) | ||
464 | { | 455 | { |
465 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 456 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
466 | bool phys_active; | 457 | bool phys_active; |
467 | int ret; | ||
468 | |||
469 | phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); | ||
470 | 458 | ||
471 | ret = irq_set_irqchip_state(host_vtimer_irq, | 459 | if (irqchip_in_kernel(vcpu->kvm)) |
472 | IRQCHIP_STATE_ACTIVE, | 460 | phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); |
473 | phys_active); | 461 | else |
474 | WARN_ON(ret); | 462 | phys_active = vtimer->irq.level; |
463 | set_vtimer_irq_phys_active(vcpu, phys_active); | ||
475 | } | 464 | } |
476 | 465 | ||
477 | static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu) | 466 | static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) |
478 | { | 467 | { |
479 | kvm_vtimer_update_mask_user(vcpu); | 468 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
469 | |||
470 | /* | ||
471 | * When using a userspace irqchip with the architected timers and a | ||
472 | * host interrupt controller that doesn't support an active state, we | ||
473 | * must still prevent continuously exiting from the guest, and | ||
474 | * therefore mask the physical interrupt by disabling it on the host | ||
475 | * interrupt controller when the virtual level is high, such that the | ||
476 | * guest can make forward progress. Once we detect the output level | ||
477 | * being de-asserted, we unmask the interrupt again so that we exit | ||
478 | * from the guest when the timer fires. | ||
479 | */ | ||
480 | if (vtimer->irq.level) | ||
481 | disable_percpu_irq(host_vtimer_irq); | ||
482 | else | ||
483 | enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); | ||
480 | } | 484 | } |
481 | 485 | ||
482 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) | 486 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) |
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) | |||
487 | if (unlikely(!timer->enabled)) | 491 | if (unlikely(!timer->enabled)) |
488 | return; | 492 | return; |
489 | 493 | ||
490 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | 494 | if (static_branch_likely(&has_gic_active_state)) |
491 | kvm_timer_vcpu_load_user(vcpu); | 495 | kvm_timer_vcpu_load_gic(vcpu); |
492 | else | 496 | else |
493 | kvm_timer_vcpu_load_vgic(vcpu); | 497 | kvm_timer_vcpu_load_nogic(vcpu); |
494 | 498 | ||
495 | set_cntvoff(vtimer->cntvoff); | 499 | set_cntvoff(vtimer->cntvoff); |
496 | 500 | ||
@@ -555,22 +559,29 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) | |||
555 | { | 559 | { |
556 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 560 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
557 | 561 | ||
558 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { | 562 | if (!kvm_timer_should_fire(vtimer)) { |
559 | __timer_snapshot_state(vtimer); | 563 | kvm_timer_update_irq(vcpu, false, vtimer); |
560 | if (!kvm_timer_should_fire(vtimer)) { | 564 | if (static_branch_likely(&has_gic_active_state)) |
561 | kvm_timer_update_irq(vcpu, false, vtimer); | 565 | set_vtimer_irq_phys_active(vcpu, false); |
562 | kvm_vtimer_update_mask_user(vcpu); | 566 | else |
563 | } | 567 | enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); |
564 | } | 568 | } |
565 | } | 569 | } |
566 | 570 | ||
567 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | 571 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) |
568 | { | 572 | { |
569 | unmask_vtimer_irq_user(vcpu); | 573 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
574 | |||
575 | if (unlikely(!timer->enabled)) | ||
576 | return; | ||
577 | |||
578 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | ||
579 | unmask_vtimer_irq_user(vcpu); | ||
570 | } | 580 | } |
571 | 581 | ||
572 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | 582 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) |
573 | { | 583 | { |
584 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
574 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 585 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
575 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 586 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
576 | 587 | ||
@@ -584,6 +595,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | |||
584 | ptimer->cnt_ctl = 0; | 595 | ptimer->cnt_ctl = 0; |
585 | kvm_timer_update_state(vcpu); | 596 | kvm_timer_update_state(vcpu); |
586 | 597 | ||
598 | if (timer->enabled && irqchip_in_kernel(vcpu->kvm)) | ||
599 | kvm_vgic_reset_mapped_irq(vcpu, vtimer->irq.irq); | ||
600 | |||
587 | return 0; | 601 | return 0; |
588 | } | 602 | } |
589 | 603 | ||
@@ -753,9 +767,11 @@ int kvm_timer_hyp_init(bool has_gic) | |||
753 | kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); | 767 | kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); |
754 | goto out_free_irq; | 768 | goto out_free_irq; |
755 | } | 769 | } |
770 | |||
771 | static_branch_enable(&has_gic_active_state); | ||
756 | } | 772 | } |
757 | 773 | ||
758 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); | 774 | kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq); |
759 | 775 | ||
760 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, | 776 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, |
761 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, | 777 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, |
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 86941f6181bb..53572304843b 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -384,14 +384,11 @@ static void vcpu_power_off(struct kvm_vcpu *vcpu) | |||
384 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 384 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
385 | struct kvm_mp_state *mp_state) | 385 | struct kvm_mp_state *mp_state) |
386 | { | 386 | { |
387 | vcpu_load(vcpu); | ||
388 | |||
389 | if (vcpu->arch.power_off) | 387 | if (vcpu->arch.power_off) |
390 | mp_state->mp_state = KVM_MP_STATE_STOPPED; | 388 | mp_state->mp_state = KVM_MP_STATE_STOPPED; |
391 | else | 389 | else |
392 | mp_state->mp_state = KVM_MP_STATE_RUNNABLE; | 390 | mp_state->mp_state = KVM_MP_STATE_RUNNABLE; |
393 | 391 | ||
394 | vcpu_put(vcpu); | ||
395 | return 0; | 392 | return 0; |
396 | } | 393 | } |
397 | 394 | ||
@@ -400,8 +397,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
400 | { | 397 | { |
401 | int ret = 0; | 398 | int ret = 0; |
402 | 399 | ||
403 | vcpu_load(vcpu); | ||
404 | |||
405 | switch (mp_state->mp_state) { | 400 | switch (mp_state->mp_state) { |
406 | case KVM_MP_STATE_RUNNABLE: | 401 | case KVM_MP_STATE_RUNNABLE: |
407 | vcpu->arch.power_off = false; | 402 | vcpu->arch.power_off = false; |
@@ -413,7 +408,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
413 | ret = -EINVAL; | 408 | ret = -EINVAL; |
414 | } | 409 | } |
415 | 410 | ||
416 | vcpu_put(vcpu); | ||
417 | return ret; | 411 | return ret; |
418 | } | 412 | } |
419 | 413 | ||
@@ -1036,8 +1030,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1036 | struct kvm_device_attr attr; | 1030 | struct kvm_device_attr attr; |
1037 | long r; | 1031 | long r; |
1038 | 1032 | ||
1039 | vcpu_load(vcpu); | ||
1040 | |||
1041 | switch (ioctl) { | 1033 | switch (ioctl) { |
1042 | case KVM_ARM_VCPU_INIT: { | 1034 | case KVM_ARM_VCPU_INIT: { |
1043 | struct kvm_vcpu_init init; | 1035 | struct kvm_vcpu_init init; |
@@ -1114,7 +1106,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1114 | r = -EINVAL; | 1106 | r = -EINVAL; |
1115 | } | 1107 | } |
1116 | 1108 | ||
1117 | vcpu_put(vcpu); | ||
1118 | return r; | 1109 | return r; |
1119 | } | 1110 | } |
1120 | 1111 | ||
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c index f5c3d6d7019e..b89ce5432214 100644 --- a/virt/kvm/arm/hyp/vgic-v3-sr.c +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c | |||
@@ -215,7 +215,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
215 | * are now visible to the system register interface. | 215 | * are now visible to the system register interface. |
216 | */ | 216 | */ |
217 | if (!cpu_if->vgic_sre) { | 217 | if (!cpu_if->vgic_sre) { |
218 | dsb(st); | 218 | dsb(sy); |
219 | isb(); | ||
219 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); | 220 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); |
220 | } | 221 | } |
221 | 222 | ||
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index ec62d1cccab7..b960acdd0c05 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
@@ -1810,9 +1810,9 @@ int kvm_mmu_init(void) | |||
1810 | */ | 1810 | */ |
1811 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); | 1811 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); |
1812 | 1812 | ||
1813 | kvm_info("IDMAP page: %lx\n", hyp_idmap_start); | 1813 | kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); |
1814 | kvm_info("HYP VA range: %lx:%lx\n", | 1814 | kvm_debug("HYP VA range: %lx:%lx\n", |
1815 | kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); | 1815 | kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); |
1816 | 1816 | ||
1817 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && | 1817 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && |
1818 | hyp_idmap_start < kern_hyp_va(~0UL) && | 1818 | hyp_idmap_start < kern_hyp_va(~0UL) && |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 83d82bd7dc4e..dbe99d635c80 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
@@ -113,9 +113,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, | |||
113 | /* Loop over all IRQs affected by this read */ | 113 | /* Loop over all IRQs affected by this read */ |
114 | for (i = 0; i < len * 8; i++) { | 114 | for (i = 0; i < len * 8; i++) { |
115 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 115 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
116 | unsigned long flags; | ||
116 | 117 | ||
118 | spin_lock_irqsave(&irq->irq_lock, flags); | ||
117 | if (irq_is_pending(irq)) | 119 | if (irq_is_pending(irq)) |
118 | value |= (1U << i); | 120 | value |= (1U << i); |
121 | spin_unlock_irqrestore(&irq->irq_lock, flags); | ||
119 | 122 | ||
120 | vgic_put_irq(vcpu->kvm, irq); | 123 | vgic_put_irq(vcpu->kvm, irq); |
121 | } | 124 | } |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index c32d7b93ffd1..29556f71b691 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void) | |||
37 | vgic_v2_write_lr(i, 0); | 37 | vgic_v2_write_lr(i, 0); |
38 | } | 38 | } |
39 | 39 | ||
40 | void vgic_v2_set_npie(struct kvm_vcpu *vcpu) | ||
41 | { | ||
42 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | ||
43 | |||
44 | cpuif->vgic_hcr |= GICH_HCR_NPIE; | ||
45 | } | ||
46 | |||
40 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) | 47 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) |
41 | { | 48 | { |
42 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | 49 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; |
@@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
64 | int lr; | 71 | int lr; |
65 | unsigned long flags; | 72 | unsigned long flags; |
66 | 73 | ||
67 | cpuif->vgic_hcr &= ~GICH_HCR_UIE; | 74 | cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE); |
68 | 75 | ||
69 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { | 76 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { |
70 | u32 val = cpuif->vgic_lr[lr]; | 77 | u32 val = cpuif->vgic_lr[lr]; |
@@ -410,7 +417,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info) | |||
410 | kvm_vgic_global_state.type = VGIC_V2; | 417 | kvm_vgic_global_state.type = VGIC_V2; |
411 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; | 418 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; |
412 | 419 | ||
413 | kvm_info("vgic-v2@%llx\n", info->vctrl.start); | 420 | kvm_debug("vgic-v2@%llx\n", info->vctrl.start); |
414 | 421 | ||
415 | return 0; | 422 | return 0; |
416 | out: | 423 | out: |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 6b329414e57a..0ff2006f3781 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -26,6 +26,13 @@ static bool group1_trap; | |||
26 | static bool common_trap; | 26 | static bool common_trap; |
27 | static bool gicv4_enable; | 27 | static bool gicv4_enable; |
28 | 28 | ||
29 | void vgic_v3_set_npie(struct kvm_vcpu *vcpu) | ||
30 | { | ||
31 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | ||
32 | |||
33 | cpuif->vgic_hcr |= ICH_HCR_NPIE; | ||
34 | } | ||
35 | |||
29 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) | 36 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) |
30 | { | 37 | { |
31 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | 38 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; |
@@ -47,7 +54,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
47 | int lr; | 54 | int lr; |
48 | unsigned long flags; | 55 | unsigned long flags; |
49 | 56 | ||
50 | cpuif->vgic_hcr &= ~ICH_HCR_UIE; | 57 | cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE); |
51 | 58 | ||
52 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { | 59 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { |
53 | u64 val = cpuif->vgic_lr[lr]; | 60 | u64 val = cpuif->vgic_lr[lr]; |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index c7c5ef190afa..8201899126f6 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -495,6 +495,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, | |||
495 | return ret; | 495 | return ret; |
496 | } | 496 | } |
497 | 497 | ||
498 | /** | ||
499 | * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ | ||
500 | * @vcpu: The VCPU pointer | ||
501 | * @vintid: The INTID of the interrupt | ||
502 | * | ||
503 | * Reset the active and pending states of a mapped interrupt. Kernel | ||
504 | * subsystems injecting mapped interrupts should reset their interrupt lines | ||
505 | * when we are doing a reset of the VM. | ||
506 | */ | ||
507 | void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) | ||
508 | { | ||
509 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | ||
510 | unsigned long flags; | ||
511 | |||
512 | if (!irq->hw) | ||
513 | goto out; | ||
514 | |||
515 | spin_lock_irqsave(&irq->irq_lock, flags); | ||
516 | irq->active = false; | ||
517 | irq->pending_latch = false; | ||
518 | irq->line_level = false; | ||
519 | spin_unlock_irqrestore(&irq->irq_lock, flags); | ||
520 | out: | ||
521 | vgic_put_irq(vcpu->kvm, irq); | ||
522 | } | ||
523 | |||
498 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) | 524 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) |
499 | { | 525 | { |
500 | struct vgic_irq *irq; | 526 | struct vgic_irq *irq; |
@@ -684,22 +710,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) | |||
684 | vgic_v3_set_underflow(vcpu); | 710 | vgic_v3_set_underflow(vcpu); |
685 | } | 711 | } |
686 | 712 | ||
713 | static inline void vgic_set_npie(struct kvm_vcpu *vcpu) | ||
714 | { | ||
715 | if (kvm_vgic_global_state.type == VGIC_V2) | ||
716 | vgic_v2_set_npie(vcpu); | ||
717 | else | ||
718 | vgic_v3_set_npie(vcpu); | ||
719 | } | ||
720 | |||
687 | /* Requires the ap_list_lock to be held. */ | 721 | /* Requires the ap_list_lock to be held. */ |
688 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu) | 722 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu, |
723 | bool *multi_sgi) | ||
689 | { | 724 | { |
690 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 725 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
691 | struct vgic_irq *irq; | 726 | struct vgic_irq *irq; |
692 | int count = 0; | 727 | int count = 0; |
693 | 728 | ||
729 | *multi_sgi = false; | ||
730 | |||
694 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 731 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
695 | 732 | ||
696 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 733 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
697 | spin_lock(&irq->irq_lock); | 734 | spin_lock(&irq->irq_lock); |
698 | /* GICv2 SGIs can count for more than one... */ | 735 | /* GICv2 SGIs can count for more than one... */ |
699 | if (vgic_irq_is_sgi(irq->intid) && irq->source) | 736 | if (vgic_irq_is_sgi(irq->intid) && irq->source) { |
700 | count += hweight8(irq->source); | 737 | int w = hweight8(irq->source); |
701 | else | 738 | |
739 | count += w; | ||
740 | *multi_sgi |= (w > 1); | ||
741 | } else { | ||
702 | count++; | 742 | count++; |
743 | } | ||
703 | spin_unlock(&irq->irq_lock); | 744 | spin_unlock(&irq->irq_lock); |
704 | } | 745 | } |
705 | return count; | 746 | return count; |
@@ -710,28 +751,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
710 | { | 751 | { |
711 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 752 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
712 | struct vgic_irq *irq; | 753 | struct vgic_irq *irq; |
713 | int count = 0; | 754 | int count; |
755 | bool npie = false; | ||
756 | bool multi_sgi; | ||
757 | u8 prio = 0xff; | ||
714 | 758 | ||
715 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 759 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
716 | 760 | ||
717 | if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) | 761 | count = compute_ap_list_depth(vcpu, &multi_sgi); |
762 | if (count > kvm_vgic_global_state.nr_lr || multi_sgi) | ||
718 | vgic_sort_ap_list(vcpu); | 763 | vgic_sort_ap_list(vcpu); |
719 | 764 | ||
765 | count = 0; | ||
766 | |||
720 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 767 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
721 | spin_lock(&irq->irq_lock); | 768 | spin_lock(&irq->irq_lock); |
722 | 769 | ||
723 | if (unlikely(vgic_target_oracle(irq) != vcpu)) | ||
724 | goto next; | ||
725 | |||
726 | /* | 770 | /* |
727 | * If we get an SGI with multiple sources, try to get | 771 | * If we have multi-SGIs in the pipeline, we need to |
728 | * them in all at once. | 772 | * guarantee that they are all seen before any IRQ of |
773 | * lower priority. In that case, we need to filter out | ||
774 | * these interrupts by exiting early. This is easy as | ||
775 | * the AP list has been sorted already. | ||
729 | */ | 776 | */ |
730 | do { | 777 | if (multi_sgi && irq->priority > prio) { |
778 | spin_unlock(&irq->irq_lock); | ||
779 | break; | ||
780 | } | ||
781 | |||
782 | if (likely(vgic_target_oracle(irq) == vcpu)) { | ||
731 | vgic_populate_lr(vcpu, irq, count++); | 783 | vgic_populate_lr(vcpu, irq, count++); |
732 | } while (irq->source && count < kvm_vgic_global_state.nr_lr); | ||
733 | 784 | ||
734 | next: | 785 | if (irq->source) { |
786 | npie = true; | ||
787 | prio = irq->priority; | ||
788 | } | ||
789 | } | ||
790 | |||
735 | spin_unlock(&irq->irq_lock); | 791 | spin_unlock(&irq->irq_lock); |
736 | 792 | ||
737 | if (count == kvm_vgic_global_state.nr_lr) { | 793 | if (count == kvm_vgic_global_state.nr_lr) { |
@@ -742,6 +798,9 @@ next: | |||
742 | } | 798 | } |
743 | } | 799 | } |
744 | 800 | ||
801 | if (npie) | ||
802 | vgic_set_npie(vcpu); | ||
803 | |||
745 | vcpu->arch.vgic_cpu.used_lrs = count; | 804 | vcpu->arch.vgic_cpu.used_lrs = count; |
746 | 805 | ||
747 | /* Nuke remaining LRs */ | 806 | /* Nuke remaining LRs */ |
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 12c37b89f7a3..f5b8519e5546 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h | |||
@@ -96,6 +96,7 @@ | |||
96 | /* we only support 64 kB translation table page size */ | 96 | /* we only support 64 kB translation table page size */ |
97 | #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) | 97 | #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) |
98 | 98 | ||
99 | /* Requires the irq_lock to be held by the caller. */ | ||
99 | static inline bool irq_is_pending(struct vgic_irq *irq) | 100 | static inline bool irq_is_pending(struct vgic_irq *irq) |
100 | { | 101 | { |
101 | if (irq->config == VGIC_CONFIG_EDGE) | 102 | if (irq->config == VGIC_CONFIG_EDGE) |
@@ -159,6 +160,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); | |||
159 | void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); | 160 | void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); |
160 | void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); | 161 | void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); |
161 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); | 162 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); |
163 | void vgic_v2_set_npie(struct kvm_vcpu *vcpu); | ||
162 | int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); | 164 | int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); |
163 | int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, | 165 | int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
164 | int offset, u32 *val); | 166 | int offset, u32 *val); |
@@ -188,6 +190,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); | |||
188 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); | 190 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); |
189 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); | 191 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); |
190 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); | 192 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); |
193 | void vgic_v3_set_npie(struct kvm_vcpu *vcpu); | ||
191 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | 194 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
192 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | 195 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
193 | void vgic_v3_enable(struct kvm_vcpu *vcpu); | 196 | void vgic_v3_enable(struct kvm_vcpu *vcpu); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4501e658e8d6..65dea3ffef68 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -969,8 +969,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
969 | /* Check for overlaps */ | 969 | /* Check for overlaps */ |
970 | r = -EEXIST; | 970 | r = -EEXIST; |
971 | kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { | 971 | kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { |
972 | if ((slot->id >= KVM_USER_MEM_SLOTS) || | 972 | if (slot->id == id) |
973 | (slot->id == id)) | ||
974 | continue; | 973 | continue; |
975 | if (!((base_gfn + npages <= slot->base_gfn) || | 974 | if (!((base_gfn + npages <= slot->base_gfn) || |
976 | (base_gfn >= slot->base_gfn + slot->npages))) | 975 | (base_gfn >= slot->base_gfn + slot->npages))) |